content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ped_convert.R \name{as.matrix.ped} \alias{as.matrix.ped} \alias{restore_ped} \title{Convert ped to matrix} \usage{ \method{as.matrix}{ped}(x, include.attrs = TRUE, ...) restore_ped(x, attrs = NULL, validate = TRUE) } \arguments{ \item{x}{a \code{ped} object. In \code{restore_ped}: A numerical matrix.} \item{include.attrs}{a logical indicating if marker annotations and other info should be attached as attributes. See value.} \item{\dots}{not used.} \item{attrs}{a list containing labels and other \code{ped} info compatible with \code{x}, in the format produced by \code{as.matrix}. If NULL, the attributes of \code{x} itself are used.} \item{validate}{a logical, forwarded to \code{\link[=ped]{ped()}}. If FALSE, no checks for pedigree errors are performed.} } \value{ For \code{as.matrix}: A numerical matrix with \code{pedsize(x)} rows. If \code{include.attrs = TRUE} the following attributes are added to the matrix, allowing \code{x} to be exactly reproduced by \code{restore_ped}: \itemize{ \item \code{FAMID} the family identifier (a string) \item \code{LABELS} the ID labels (a character vector) \item \code{UNBROKEN_LOOPS} a logical indicating whether \code{x} has unboken loops \item \code{LOOP_BREAKERS} a numerical matrix, or NULL \item \code{markerattr} a list of length \code{nMarkers(x)}, containing the attributes of each marker } For \code{restore_ped}: A \code{ped} object. } \description{ Converts a \code{ped} object to a numeric matrix using internal labels, with additional info neccessary to recreate the original \code{ped} attached as attributes. } \details{ \code{restore_ped} is the reverse of \code{as.matrix.ped}. } \examples{ x = relabel(nuclearPed(1), letters[1:3]) # To examplify the ped -> matrix -> ped trick, we show how to # reverse the internal ordering of the pedigree. m = as.matrix(x, include.attrs=TRUE) m[] = m[3:1, ] # Must reverse the labels also: attrs = attributes(m) attrs$LABELS = rev(attrs$LABELS) # Restore ped: y = restore_ped(m, attrs=attrs) # Of course a simpler way is use reorderPed(): z = reorderPed(x, 3:1) stopifnot(identical(y, z)) } \seealso{ \code{\link[=ped]{ped()}} } \author{ Magnus Dehli Vigeland }
/man/as.matrix.ped.Rd
no_license
luansheng/pedtools
R
false
true
2,332
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ped_convert.R \name{as.matrix.ped} \alias{as.matrix.ped} \alias{restore_ped} \title{Convert ped to matrix} \usage{ \method{as.matrix}{ped}(x, include.attrs = TRUE, ...) restore_ped(x, attrs = NULL, validate = TRUE) } \arguments{ \item{x}{a \code{ped} object. In \code{restore_ped}: A numerical matrix.} \item{include.attrs}{a logical indicating if marker annotations and other info should be attached as attributes. See value.} \item{\dots}{not used.} \item{attrs}{a list containing labels and other \code{ped} info compatible with \code{x}, in the format produced by \code{as.matrix}. If NULL, the attributes of \code{x} itself are used.} \item{validate}{a logical, forwarded to \code{\link[=ped]{ped()}}. If FALSE, no checks for pedigree errors are performed.} } \value{ For \code{as.matrix}: A numerical matrix with \code{pedsize(x)} rows. If \code{include.attrs = TRUE} the following attributes are added to the matrix, allowing \code{x} to be exactly reproduced by \code{restore_ped}: \itemize{ \item \code{FAMID} the family identifier (a string) \item \code{LABELS} the ID labels (a character vector) \item \code{UNBROKEN_LOOPS} a logical indicating whether \code{x} has unboken loops \item \code{LOOP_BREAKERS} a numerical matrix, or NULL \item \code{markerattr} a list of length \code{nMarkers(x)}, containing the attributes of each marker } For \code{restore_ped}: A \code{ped} object. } \description{ Converts a \code{ped} object to a numeric matrix using internal labels, with additional info neccessary to recreate the original \code{ped} attached as attributes. } \details{ \code{restore_ped} is the reverse of \code{as.matrix.ped}. } \examples{ x = relabel(nuclearPed(1), letters[1:3]) # To examplify the ped -> matrix -> ped trick, we show how to # reverse the internal ordering of the pedigree. m = as.matrix(x, include.attrs=TRUE) m[] = m[3:1, ] # Must reverse the labels also: attrs = attributes(m) attrs$LABELS = rev(attrs$LABELS) # Restore ped: y = restore_ped(m, attrs=attrs) # Of course a simpler way is use reorderPed(): z = reorderPed(x, 3:1) stopifnot(identical(y, z)) } \seealso{ \code{\link[=ped]{ped()}} } \author{ Magnus Dehli Vigeland }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/internet_browser.R \name{internet_browser} \alias{internet_browser} \title{Generate Random Vector of Internet Browsers} \usage{ internet_browser(n, x = c("Chrome", "IE", "Firefox", "Safari", "Opera", "Android"), prob = c(0.5027, 0.175, 0.1689, 0.0994, 0.017, 0.0132), name = "Browser") } \arguments{ \item{n}{The number elements to generate. This can be globally set within the environment of \code{r_data_frame} or \code{r_list}.} \item{x}{A vector of elements to chose from.} \item{prob}{A vector of probabilities to chose from.} \item{name}{The name to assign to the output vector's \code{varname} attribute. This is used to auto assign names to the column/vector name when used inside of \code{r_data_frame} or \code{r_list}.} } \value{ Returns a random factor vector of Internet browser elements. } \description{ Generate a random vector of Internet browser. } \details{ The browser use and probabilities (from \url{http://gs.statcounter.com}): \tabular{lr}{ \bold{ Browser} \tab \bold{Percent} \cr Chrome \tab 50.27 \%\cr IE \tab 17.50 \% \cr Firefox \tab 16.89 \%\cr Safari \tab 9.94 \% \cr Opera \tab 1.70 \% \cr Android \tab 1.32 \% \cr } } \examples{ internet_browser(20) barplot(table(internet_browser(10000))) pie(table(internet_browser(10000))) } \references{ \url{http://www.pewforum.org/2012/12/18/table-religious-composition-by-country-in-numbers} } \seealso{ Other variable functions: \code{\link{age}}, \code{\link{animal}}, \code{\link{answer}}, \code{\link{area}}, \code{\link{car}}, \code{\link{children}}, \code{\link{coin}}, \code{\link{color}}, \code{\link{date_stamp}}, \code{\link{death}}, \code{\link{dice}}, \code{\link{dna}}, \code{\link{dob}}, \code{\link{dummy}}, \code{\link{education}}, \code{\link{employment}}, \code{\link{eye}}, \code{\link{grade_level}}, \code{\link{grade}}, \code{\link{group}}, \code{\link{hair}}, \code{\link{height}}, \code{\link{income}}, \code{\link{iq}}, \code{\link{language}}, \code{\link{level}}, \code{\link{likert}}, \code{\link{lorem_ipsum}}, \code{\link{marital}}, \code{\link{military}}, \code{\link{month}}, \code{\link{name}}, \code{\link{normal}}, \code{\link{political}}, \code{\link{race}}, \code{\link{religion}}, \code{\link{sat}}, \code{\link{sentence}}, \code{\link{sex_inclusive}}, \code{\link{sex}}, \code{\link{smokes}}, \code{\link{speed}}, \code{\link{state}}, \code{\link{string}}, \code{\link{upper}}, \code{\link{valid}}, \code{\link{year}}, \code{\link{zip_code}} } \keyword{browser}
/man/internet_browser.Rd
no_license
iqis/wakefield
R
false
true
2,632
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/internet_browser.R \name{internet_browser} \alias{internet_browser} \title{Generate Random Vector of Internet Browsers} \usage{ internet_browser(n, x = c("Chrome", "IE", "Firefox", "Safari", "Opera", "Android"), prob = c(0.5027, 0.175, 0.1689, 0.0994, 0.017, 0.0132), name = "Browser") } \arguments{ \item{n}{The number elements to generate. This can be globally set within the environment of \code{r_data_frame} or \code{r_list}.} \item{x}{A vector of elements to chose from.} \item{prob}{A vector of probabilities to chose from.} \item{name}{The name to assign to the output vector's \code{varname} attribute. This is used to auto assign names to the column/vector name when used inside of \code{r_data_frame} or \code{r_list}.} } \value{ Returns a random factor vector of Internet browser elements. } \description{ Generate a random vector of Internet browser. } \details{ The browser use and probabilities (from \url{http://gs.statcounter.com}): \tabular{lr}{ \bold{ Browser} \tab \bold{Percent} \cr Chrome \tab 50.27 \%\cr IE \tab 17.50 \% \cr Firefox \tab 16.89 \%\cr Safari \tab 9.94 \% \cr Opera \tab 1.70 \% \cr Android \tab 1.32 \% \cr } } \examples{ internet_browser(20) barplot(table(internet_browser(10000))) pie(table(internet_browser(10000))) } \references{ \url{http://www.pewforum.org/2012/12/18/table-religious-composition-by-country-in-numbers} } \seealso{ Other variable functions: \code{\link{age}}, \code{\link{animal}}, \code{\link{answer}}, \code{\link{area}}, \code{\link{car}}, \code{\link{children}}, \code{\link{coin}}, \code{\link{color}}, \code{\link{date_stamp}}, \code{\link{death}}, \code{\link{dice}}, \code{\link{dna}}, \code{\link{dob}}, \code{\link{dummy}}, \code{\link{education}}, \code{\link{employment}}, \code{\link{eye}}, \code{\link{grade_level}}, \code{\link{grade}}, \code{\link{group}}, \code{\link{hair}}, \code{\link{height}}, \code{\link{income}}, \code{\link{iq}}, \code{\link{language}}, \code{\link{level}}, \code{\link{likert}}, \code{\link{lorem_ipsum}}, \code{\link{marital}}, \code{\link{military}}, \code{\link{month}}, \code{\link{name}}, \code{\link{normal}}, \code{\link{political}}, \code{\link{race}}, \code{\link{religion}}, \code{\link{sat}}, \code{\link{sentence}}, \code{\link{sex_inclusive}}, \code{\link{sex}}, \code{\link{smokes}}, \code{\link{speed}}, \code{\link{state}}, \code{\link{string}}, \code{\link{upper}}, \code{\link{valid}}, \code{\link{year}}, \code{\link{zip_code}} } \keyword{browser}
\name{m} \alias{m} double2 \title{ multiply } \description{ test run double } \usage{ m(a,...) } value to double \arguments{ na2 \item{a}{ numeric } } \details{ NA } \value{ test } \references{ shen } \author{ sheen } \note{ ho } \seealso{ gffgg } \examples{ d 3; function (a) { s <- a + a return(s) } } double \keyword{test3 } \keyword{ f-test }
/man/m.Rd
no_license
ArefinSheen/f1
R
false
false
363
rd
\name{m} \alias{m} double2 \title{ multiply } \description{ test run double } \usage{ m(a,...) } value to double \arguments{ na2 \item{a}{ numeric } } \details{ NA } \value{ test } \references{ shen } \author{ sheen } \note{ ho } \seealso{ gffgg } \examples{ d 3; function (a) { s <- a + a return(s) } } double \keyword{test3 } \keyword{ f-test }
# load the necessary libraries library(shiny) library(shinydashboard) library(ggplot2) library(reshape2) ####################################################################################### ########################### START OF DASHBOARDPAGE #################################### ####################################################################################### ui <- dashboardPage( ############################ SIDEBAR ########################################## # sidebar title + upper part dashboardHeader(title = "Polya's urn Dashboard"), dashboardSidebar( # sidebar 'body' sidebarMenu(id="tabs", # this part below is needed so that the tab items stay active and clicking on a new active tab item updates the UI and the user actually gets to the page he selected sidebarMenuOutput("menu"), tags$head( tags$script( HTML( " $(document).ready(function(){ // Bind classes to menu items, easiet to fill in manually var ids = ['dashboard','Pr??diction','Interpr??tation']; for(i=0; i<ids.length; i++){ $('a[data-value='+ids[i]+']').addClass('my_subitem_class'); } // Register click handeler $('.my_subitem_class').on('click',function(){ // Unactive menuSubItems $('.my_subitem_class').parent().removeClass('active'); }) }) " ) ) ) ) ), # end of dashboardSidebar ############################ DASHBOARDBODY ########################################## dashboardBody( tabItems( ################################## PAGE 1 tabItem(tabName = "m1", tabsetPanel( ################ TAB 1 - THEORY PART tabPanel("Introduction to the theory", ################ PAGE 1 / TAB 1 - THE THEORY PART # title and HTML description of the Polya's urn model and the dashboard (this page consists of text only) HTML( "<h1 align = 'center'><b> Polya's urn model - An introduction </h1></b><br> <h3> In statistics, Polya's urn model (also called Polya's urn), named after George Polya, is a type of statistical model. In this framework, objects of real interest (such as behaviour, memories, or even cars etc.) can be represented as differently colored balls. These 'objects of interest' are mixed in an urn, which contains x white and y black balls. At each iteration, one ball is drawn randomly. Its colour is observed and the ball gets returned, together with an additionally ball from the same colour. </h3> <br> <h3> Polya's urn model can thus simulate many phenomena, including, but not limited to:</h3> <br><ul><h3> <li> 'The rich get richer, the poor get poorer... ' demonstrating that it is easier to make more money if you have $10000 compared to $10 </li> <li> Learning, where the black and white balls represent good and bad 'memories' for the subject at hand </li> <li> Popularity of a brand, where two brands are initially equally good but when one takes off, it can rather suddenly dominate the market</li> </h3></ul> <h3><br> In this simulation, Polya's urn model will be used to demonstrate habit formation. The colour white will represent how often the good habit was shown and the colour black will stand for the bad habit. Each ball will correspond to one instance where a person displayed the (good or the bad) habit. </h3> <br><br><h1 align = 'center'><b> An introduction to this dashboard </h3></b><br> <h3> You will have the opportunity to play around with several different features to simulate habit formation. The next page displays the basic model, where you can choose: <ul><br><li> The initial number of balls for both colors </li> <li> The number of iterations. Each iteration represents one ball getting picked (randomly) and put back together with an additional ball of the same colour</li> <li> The number of trials (also called runs). This represents how often you would to replicate process of having an initial set of white and black balls getting picked from the urn </li></h3></ul> <h3> After having played around a bit with the basic model, the rest of the tabs contain a variety of extra features, such as: </h3> <h3><ul> <li> Feedback - you will be able to change how many balls get put into the urn after each iteration </li> <li> Forgetting - you can simluate a brain injury or simply time, where the habit had not been practiced and the memories/abilites faded </li> <li> Streak - you can simulate that after a number of solely good or bad instances, additional balls of the same colour be added to the urn </li> </h3> <h3> In the end, there is a quiz where you can test what you have learned.</h3></ul> " ) ), ############## PAGE 1 / TAB 2 - INTrO TO POLYAS URN WITH GRAPH tabPanel("The basic model", # title of the tab HTML( # main title of the page "<h1 align ='center'><b> The basic Polya's urn model </b></h3><br><br><br>"), fluidRow( # fluidRow encompasses the various boxes the site is made up of box( # this box contains the information necessary to understand the functionality of this page HTML( # the text includes the user guide for the input and some examples regarding the theory (how habits develop) "<h4> In the basic Polya's urn model you can choose how many white and black balls (or rather good and bad displays of a habit) you would like to start out with. Starting with 1 ball of each, will yield the most obvious results, since putting new balls into the urn will have a bigger impact on the proportion of white and black balls, than if you start with e.g. 100 of each. </h4> <h4> Furthermore, you can also choose how many iterations you want. Remember, each iteration equals to a.) drawing a ball at random from the urn, b.) putting it back together with an additional ball of the same colour. Thus, having a higher number of iterations means that more balls will be drawn and added back to the urn. </h4> <h4> You can also determine the number of runs (or trials), which corresponds to how often you would like to see the scenario play out. With fewer runs, you can see the iterations that each one goes through better. Conversely, with many runs, you can see how the probabilities work and which direction the proportion will tend to go to.</h4> <h4> Try experimenting with changing the proportions and see what happens when you start with e.g. 10 balls of each color, or if you have twice as many black balls compared to white balls, etc. Also think about how this corresponds to habit formation. What if, for example, a child has a bad habit (i.e. biting nails) that he has been doing for a long time and has thus many times displayed the bad habit? </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li></ul></h4> ") ), box( # this box contains the user input fields # title of the box title = "Select the input for the urn", numericInput(inputId = "BB1", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB1", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt1", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun1", label= "Number of runs", 10, min = 1, max = 50) ) ), ## the plot - reactive & is determined by the user input from the box above plotOutput("plot1", height = 600, width = 1810) ) ) ), ##################### TAB 2 - FEEDBACK tabItem(tabName = "m2", # title of the page HTML( "<h1 align ='center'><b> Polya's urn model and the power of feedback </b></h3><br><br><br>"), # fluidRow here too contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this box contains the description of the feedback, with examples and explains how to handle the input HTML( "<h4> Additionally to the functionality you saw previously (number of balls to start with, number of iterations and runs), here you have an additional feature, namely 'feedback'. In this tab you can determine how many white and black balls should be put to the urn, when one got drawn. </h4> <h4> The standard is 1, but you can try out what happens when starting out with e.g. 1 ball of each color, but you put back 10 white balls and only 1 black ball when a ball of that color gets chosen. </h4> <h4> Translating this to our example, habits, you can imagine that a white ball would represent positive reward as a result of the good behaviour and the black ball would then represent a negative reward. </h4> <h4> Try to see how many white balls you would need to add to the urn after a ball gets drawn, when you start with twice as many black balls. Also try, how the initial number of balls influences this. For example, does starting with 4 black and 2 balls vs 200 black and 100 white balls make a difference when you can just double the number of white balls for the feedback (or increase tenfold)? </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li><li> The amount of balls for the feedback (for both colours): 1-20. </li></ul></h4> ") ), box( # this box contains the input for the initial urn and also for the feedback # title of the box title = "Select the input for the urn", # input for the number of black balls numericInput(inputId = "BB2", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB2", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt2", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun2", label= "Number of runs", 10, min = 1, max = 100), # slider for the number of BBs as feedback numericInput(inputId = "BB_feedback2", label = "Feedback for black balls", 1, min = 1, max = 20), # slider for the number of WBs as feedback numericInput(inputId = "WB_feedback2", label = "Feedback for white balls", 1, min = 1, max = 20) ) ), # the code for the plot plotOutput("plot2", height = 600, width = 1810) ), ######################## TAB 3 - FORGETTING tabItem(tabName = "m3", # title of the page HTML( "<h1 align ='center'><b> The effect of forgetting on the urn </b></h3><br><br><br>"), # fluidRow contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this box contains the info text for the user HTML( "<h4> In this tab you can determine after how many iterations forgetting should take place.</h4> <br><h4> Be careful that you consider how many iterations you want to have and choose a number that is less than that. Similarly, when determining how many instances of behaviour (i.e. balls) you want our hypothetical person to 'forget', choose a number that is less than the total number of balls in the urn at that time. </h4> <h4> Translating this to our example, habits, you can imagine how a head trauma could affect one's strive to eliminate (or at least limit) one's proportion of good habit displayals. Try different scenarios where you see how the timing of forgetting and what proportion of balls are eliminated influence the graph. </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1 to 500</li><li> The number of runs: 1-100</li><li> The number of iterations after which forgetting can take place: 5-400.</li> <li>The number of balls that can be 'forgotten': 5-400.</li></ul></h4> <br><h4><b> Pay attention that the iteration after which forgetting takes places is still within the range of iterations you chose for the urn.</b></h4><br> ") ), box(# this box contains the input for the initial urn and also for the forgetting functionality # title of the box title = "Select the input for the urn", # input for the number of black balls numericInput(inputId = "BB3", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB3", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt3", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs numericInput(inputId = "nRun3", label= "Number of runs", 10, min = 1, max = 50), # slider for the number of BBs as feedback numericInput(inputId = "forget_when3", label = "After how many iterations should forgetting take place?", 20, min = 5, max = 400), # slider for the number of WBs as feedback numericInput(inputId = "forget_howmany3", label = "How many instances to forget?", 10, min = 5, max = 400) ) ), # the plot plotOutput("plot3", height = 500, width = 1810) ), #################### TAB 4 - STREAK tabItem(tabName = "m4", # title of the page HTML( "<h1 align ='center'><b> The effect of a streak </b></h3><br><br><br>"), # fluidRow contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this part contains the information text for the user HTML( "<h4> In this tab you can determine after how many iterations forgetting should take place.</h4> <br><h4> Consider how many balls of each color you have in the urn. What effect would having much more white balls and a high number of balls as a reward for a streak mean? </h4> <br> <h4> Thinking about habits in this way, one can imagine that after many succesful times that you managed to fight a bad habit, you get an extra boost. Alternatively, after having displayed a bit habit ten times in a row, one might feel like giving up fighting it. </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li><li> The number of balls that need to be picked of the same color that should constitute a streak: 4-50 </li> <li>The number of balls that are added to the urn after a streak: 5-100</li></ul></h4> ") ), box( # this box contains the input for the initial urn and also for the streak title = "Select the input for the urn", numericInput(inputId = "BB4", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB4", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt4", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun4", label= "Number of runs", 10, min = 1, max = 100), # input for when the streak should happen numericInput(inputId = "streak_when4", label = "When should the streak happen?", 10, min = 4, max = 50), # input for the number of balls that should be added to the urn when the streak happens numericInput(inputId = "streak_howmany4", label= "How many balls to add when the streak happens?", 5, min = 5, max = 100) ) ), # the plot plotOutput("plot4", height=500, width= 1810) ), ################## TAB 5 - QUIZZES tabItem(tabName = "m5", fluidRow( HTML( # title of the page "<h1 align ='center'><b> Quiz </b></h3><br><br><br>"), box(status = "primary", title = "Answer these questions to see what you've learned", # the blue header of the quiz box with title solidHeader = TRUE, width = 8, ################## QUESTION 1 fluidRow( column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q1", "How can you increase the proportion of white balls?", c("Start with more white balls" = "a", "Put more black balls back after one had been drawn" = "b", "Forgetting should take place after half the iterations" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r1") ), ################## QUESTION 2 column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q2", "How would you increase the proportion of black balls?", c("Start with 2 white balls and one black ball but put 5 black balls and only 1 black ball as feedback" = "a", " Alter how many balls to add when a streak happens" = "b", "Forgetting should only take place for white balls" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r2") ) ), ################## QUESTION 3 fluidRow( column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q3", "When does forgetting have a (relatively) high impact?", c("If it happens early in the trial" = "a", "When you get lucky and only balls of one colour get forgotten" = "b", "If most balls up to that point are 'forgotten'" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r3") ), ################## QUESTION 4 column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q4", " When is the proportion of the two colours relatively stable ", c("When you start with few balls of each" = "a", "When the urn is composed of approximately the same number of balls" = "b", "At nighttime" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r4") ) ) ), # this is the code for the 'box' on the right side of the page containing the 3 images showing the statistics of the answers box(status = "primary", title = "Your results", verticalLayout( infoBoxOutput("attemptBox", width = 14), # the attempt box infoBoxOutput("solvedBox", width = 14), # the '% solved' box infoBoxOutput("correctBox", width = 14) # the '% correct' box ), # the box's measures width = 4, height = 390 ) ) ) ) ) ) ####################################################################################### ############################ START OF THE SERVER ###################################### ####################################################################################### server <- function(input, output,session) { # SIDEBAR items, including the name of each page (as displayed in the sidebar), its id and icon output$menu <- renderMenu({ sidebarMenu( # introduction sidebar menuItem("Introduction", tabName="m1", icon = icon("home")), # feedback sidebar menuItem("Feedback", tabName="m2", icon = icon("envelope")), # forgetting sidebar menuItem("Forgetting", tabName="m3", icon = icon("users")), # streak sidebar menuItem("Streak", tabName="m4", icon = icon("line-chart")), # quiz sidebar menuItem("Quiz", tabName="m5", icon = icon("check")) ) }) # isolate({updateTabItems(session, "tabs", "m4")}) # if you want the dashboard to open with a specific tab number (ohter than the first) ####### PLOT 1 - the basic output$plot1 <- renderPlot ({ #### the actual code that simulates the urn source("plotfun.R") # this is necessary since the function is in this R file and this line copies the function into this file # since we are calling a function from another file, it is better to put all the user inputted data into variables in this R file, because you can't do that in the function call, as it doesn't recognise 'input$' # need to name these variables separately for each plot so that i.e. WB in the first dashboard won't affect WB in the 2nd nBB1 <- input$BB1 nWB1 <- input$WB1 nIt1 <- input$nIt1 nRun1 <- input$nRun1 # calling the function basicPlot from plotfun.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- basicPlot(nBB1, nWB1, nIt1, nRun1) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model Graph", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ##### PLOT 2 output$plot2 <- renderPlot ({ #### the actual code that simulates the urn source("feedbackPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB2 <- input$BB2 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB2 <- input$WB2 nIt2 <- input$nIt2 nRun2 <- input$nRun2 BB_feedback2 <- input$BB_feedback2 WB_feedback2 <- input$WB_feedback2 # calling the function feedbackPlot from feedbackPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- feedbackPlot(nBB2, nWB2, nIt2, nRun2, BB_feedback2, WB_feedback2) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model Graph", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ##### PLOT 3 - forgetting output$plot3 <- renderPlot ({ source("forgettingPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB3 <- input$BB3 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB3 <- input$WB3 nIt3 <- input$nIt3 nRun3 <- input$nRun3 forget_when3 <- input$forget_when3 forget_howmany3 <- input$forget_howmany3 # calling the function forgettingPlot from forgettingPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- forgettingPlot(nBB3, nWB3, nIt3, nRun3, forget_when3, forget_howmany3) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model with Option to Suffer a Trauma", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) #### PLOT 4 - STREAK output$plot4 <- renderPlot ({ source("streakPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB4 <- input$BB4 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB4 <- input$WB4 nIt4 <- input$nIt4 nRun4 <- input$nRun4 streak_when4 <- input$streak_when4 streak_howmany4 <- input$streak_howmany4 # calling the function streakPlot from streakPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- streakPlot(nBB4, nWB4, nIt4, nRun4,streak_when4, streak_howmany4) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model with Option to Suffer a Trauma", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ################ QUIZ CODE #################### pro <- reactiveValues(data = numeric(4), work = numeric(4)) ## pro is needed to update the statistics boxes on the right ####### QUESTION 1 output$r1 <- renderText({ # if the answer is the correct one (in this case a) if(input$q1 == "a"){ # update solved and attempted boxes pro$data[1] <- 1 pro$work[1] <- 1 # display that it is correct HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' } else if(input$q1 == "null"){ pro$work[1] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[1] <- 0 pro$work[1] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 2 output$r2 <- renderText({ # if the answer is the correct one (in this case a) if(input$q2 == "a"){ pro$data[2] <- 1 pro$work[2] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q2 == "null"){ pro$work[2] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[2] <- 0 pro$work[2] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 3 output$r3 <- renderText({ # if the answer is the correct one (in this case c) if(input$q3 == "c"){ pro$data[3] <- 1 pro$work[3] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q3 == "null"){ # none selected pro$work[3] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[3] <- 0 pro$work[3] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 4 output$r4 <- renderText({ # if the answer is the correct one (in this case b) if(input$q4 == "b"){ pro$data[4] <- 1 pro$work[4] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q4 == "null") { pro$work[4] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[4] <- 0 pro$work[4] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ############## BOXOUTPUT (on the quiz page, provides info about having completed the quizzes and how many questions one got right) ### questions attempted output$attemptBox <- renderInfoBox({ # title AND what to display infoBox("", "Attempted", paste(round(100*sum(pro$work)/4, 0), "%"), # icon icon = icon("pencil", lib = "glyphicon"), # color color = "orange" ) }) ### questions answered correctly output$solvedBox <- renderInfoBox({ # title AND what to display infoBox("", "Solved", paste(round(100*sum(pro$data)/4, 0), "%"), # icon icon = icon("check", lib = "glyphicon"), # color color = "green" ) }) ### percentage of correct answers output$correctBox <- renderInfoBox({ # title AND what to display if there's no numeric input if(sum(pro$data)/sum(pro$work) == "NaN"){ # what gets displayed infoBox("", "Percentage correct", paste("NA"), # icon icon = icon("thumbs-up", lib = "glyphicon"), # color color = "blue" ) } else { # title AND what to display when the value is not 0 anymore infoBox("", "Percentage correct", paste(round(sum(pro$data)/sum(pro$work)*100), "%"), # icon icon = icon("thumbs-up", lib = "glyphicon"), # color color = "blue" ) } }) } shinyApp(ui, server) ######################################## THE END OF THE CODE ###################################################
/app.R
no_license
roni-kovacs/polyaurn
R
false
false
35,896
r
# load the necessary libraries library(shiny) library(shinydashboard) library(ggplot2) library(reshape2) ####################################################################################### ########################### START OF DASHBOARDPAGE #################################### ####################################################################################### ui <- dashboardPage( ############################ SIDEBAR ########################################## # sidebar title + upper part dashboardHeader(title = "Polya's urn Dashboard"), dashboardSidebar( # sidebar 'body' sidebarMenu(id="tabs", # this part below is needed so that the tab items stay active and clicking on a new active tab item updates the UI and the user actually gets to the page he selected sidebarMenuOutput("menu"), tags$head( tags$script( HTML( " $(document).ready(function(){ // Bind classes to menu items, easiet to fill in manually var ids = ['dashboard','Pr??diction','Interpr??tation']; for(i=0; i<ids.length; i++){ $('a[data-value='+ids[i]+']').addClass('my_subitem_class'); } // Register click handeler $('.my_subitem_class').on('click',function(){ // Unactive menuSubItems $('.my_subitem_class').parent().removeClass('active'); }) }) " ) ) ) ) ), # end of dashboardSidebar ############################ DASHBOARDBODY ########################################## dashboardBody( tabItems( ################################## PAGE 1 tabItem(tabName = "m1", tabsetPanel( ################ TAB 1 - THEORY PART tabPanel("Introduction to the theory", ################ PAGE 1 / TAB 1 - THE THEORY PART # title and HTML description of the Polya's urn model and the dashboard (this page consists of text only) HTML( "<h1 align = 'center'><b> Polya's urn model - An introduction </h1></b><br> <h3> In statistics, Polya's urn model (also called Polya's urn), named after George Polya, is a type of statistical model. In this framework, objects of real interest (such as behaviour, memories, or even cars etc.) can be represented as differently colored balls. These 'objects of interest' are mixed in an urn, which contains x white and y black balls. At each iteration, one ball is drawn randomly. Its colour is observed and the ball gets returned, together with an additionally ball from the same colour. </h3> <br> <h3> Polya's urn model can thus simulate many phenomena, including, but not limited to:</h3> <br><ul><h3> <li> 'The rich get richer, the poor get poorer... ' demonstrating that it is easier to make more money if you have $10000 compared to $10 </li> <li> Learning, where the black and white balls represent good and bad 'memories' for the subject at hand </li> <li> Popularity of a brand, where two brands are initially equally good but when one takes off, it can rather suddenly dominate the market</li> </h3></ul> <h3><br> In this simulation, Polya's urn model will be used to demonstrate habit formation. The colour white will represent how often the good habit was shown and the colour black will stand for the bad habit. Each ball will correspond to one instance where a person displayed the (good or the bad) habit. </h3> <br><br><h1 align = 'center'><b> An introduction to this dashboard </h3></b><br> <h3> You will have the opportunity to play around with several different features to simulate habit formation. The next page displays the basic model, where you can choose: <ul><br><li> The initial number of balls for both colors </li> <li> The number of iterations. Each iteration represents one ball getting picked (randomly) and put back together with an additional ball of the same colour</li> <li> The number of trials (also called runs). This represents how often you would to replicate process of having an initial set of white and black balls getting picked from the urn </li></h3></ul> <h3> After having played around a bit with the basic model, the rest of the tabs contain a variety of extra features, such as: </h3> <h3><ul> <li> Feedback - you will be able to change how many balls get put into the urn after each iteration </li> <li> Forgetting - you can simluate a brain injury or simply time, where the habit had not been practiced and the memories/abilites faded </li> <li> Streak - you can simulate that after a number of solely good or bad instances, additional balls of the same colour be added to the urn </li> </h3> <h3> In the end, there is a quiz where you can test what you have learned.</h3></ul> " ) ), ############## PAGE 1 / TAB 2 - INTrO TO POLYAS URN WITH GRAPH tabPanel("The basic model", # title of the tab HTML( # main title of the page "<h1 align ='center'><b> The basic Polya's urn model </b></h3><br><br><br>"), fluidRow( # fluidRow encompasses the various boxes the site is made up of box( # this box contains the information necessary to understand the functionality of this page HTML( # the text includes the user guide for the input and some examples regarding the theory (how habits develop) "<h4> In the basic Polya's urn model you can choose how many white and black balls (or rather good and bad displays of a habit) you would like to start out with. Starting with 1 ball of each, will yield the most obvious results, since putting new balls into the urn will have a bigger impact on the proportion of white and black balls, than if you start with e.g. 100 of each. </h4> <h4> Furthermore, you can also choose how many iterations you want. Remember, each iteration equals to a.) drawing a ball at random from the urn, b.) putting it back together with an additional ball of the same colour. Thus, having a higher number of iterations means that more balls will be drawn and added back to the urn. </h4> <h4> You can also determine the number of runs (or trials), which corresponds to how often you would like to see the scenario play out. With fewer runs, you can see the iterations that each one goes through better. Conversely, with many runs, you can see how the probabilities work and which direction the proportion will tend to go to.</h4> <h4> Try experimenting with changing the proportions and see what happens when you start with e.g. 10 balls of each color, or if you have twice as many black balls compared to white balls, etc. Also think about how this corresponds to habit formation. What if, for example, a child has a bad habit (i.e. biting nails) that he has been doing for a long time and has thus many times displayed the bad habit? </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li></ul></h4> ") ), box( # this box contains the user input fields # title of the box title = "Select the input for the urn", numericInput(inputId = "BB1", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB1", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt1", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun1", label= "Number of runs", 10, min = 1, max = 50) ) ), ## the plot - reactive & is determined by the user input from the box above plotOutput("plot1", height = 600, width = 1810) ) ) ), ##################### TAB 2 - FEEDBACK tabItem(tabName = "m2", # title of the page HTML( "<h1 align ='center'><b> Polya's urn model and the power of feedback </b></h3><br><br><br>"), # fluidRow here too contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this box contains the description of the feedback, with examples and explains how to handle the input HTML( "<h4> Additionally to the functionality you saw previously (number of balls to start with, number of iterations and runs), here you have an additional feature, namely 'feedback'. In this tab you can determine how many white and black balls should be put to the urn, when one got drawn. </h4> <h4> The standard is 1, but you can try out what happens when starting out with e.g. 1 ball of each color, but you put back 10 white balls and only 1 black ball when a ball of that color gets chosen. </h4> <h4> Translating this to our example, habits, you can imagine that a white ball would represent positive reward as a result of the good behaviour and the black ball would then represent a negative reward. </h4> <h4> Try to see how many white balls you would need to add to the urn after a ball gets drawn, when you start with twice as many black balls. Also try, how the initial number of balls influences this. For example, does starting with 4 black and 2 balls vs 200 black and 100 white balls make a difference when you can just double the number of white balls for the feedback (or increase tenfold)? </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li><li> The amount of balls for the feedback (for both colours): 1-20. </li></ul></h4> ") ), box( # this box contains the input for the initial urn and also for the feedback # title of the box title = "Select the input for the urn", # input for the number of black balls numericInput(inputId = "BB2", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB2", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt2", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun2", label= "Number of runs", 10, min = 1, max = 100), # slider for the number of BBs as feedback numericInput(inputId = "BB_feedback2", label = "Feedback for black balls", 1, min = 1, max = 20), # slider for the number of WBs as feedback numericInput(inputId = "WB_feedback2", label = "Feedback for white balls", 1, min = 1, max = 20) ) ), # the code for the plot plotOutput("plot2", height = 600, width = 1810) ), ######################## TAB 3 - FORGETTING tabItem(tabName = "m3", # title of the page HTML( "<h1 align ='center'><b> The effect of forgetting on the urn </b></h3><br><br><br>"), # fluidRow contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this box contains the info text for the user HTML( "<h4> In this tab you can determine after how many iterations forgetting should take place.</h4> <br><h4> Be careful that you consider how many iterations you want to have and choose a number that is less than that. Similarly, when determining how many instances of behaviour (i.e. balls) you want our hypothetical person to 'forget', choose a number that is less than the total number of balls in the urn at that time. </h4> <h4> Translating this to our example, habits, you can imagine how a head trauma could affect one's strive to eliminate (or at least limit) one's proportion of good habit displayals. Try different scenarios where you see how the timing of forgetting and what proportion of balls are eliminated influence the graph. </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1 to 500</li><li> The number of runs: 1-100</li><li> The number of iterations after which forgetting can take place: 5-400.</li> <li>The number of balls that can be 'forgotten': 5-400.</li></ul></h4> <br><h4><b> Pay attention that the iteration after which forgetting takes places is still within the range of iterations you chose for the urn.</b></h4><br> ") ), box(# this box contains the input for the initial urn and also for the forgetting functionality # title of the box title = "Select the input for the urn", # input for the number of black balls numericInput(inputId = "BB3", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB3", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt3", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs numericInput(inputId = "nRun3", label= "Number of runs", 10, min = 1, max = 50), # slider for the number of BBs as feedback numericInput(inputId = "forget_when3", label = "After how many iterations should forgetting take place?", 20, min = 5, max = 400), # slider for the number of WBs as feedback numericInput(inputId = "forget_howmany3", label = "How many instances to forget?", 10, min = 5, max = 400) ) ), # the plot plotOutput("plot3", height = 500, width = 1810) ), #################### TAB 4 - STREAK tabItem(tabName = "m4", # title of the page HTML( "<h1 align ='center'><b> The effect of a streak </b></h3><br><br><br>"), # fluidRow contains the boxes that make up the page (except for the plot, which comes after fluidRow) fluidRow( box( # this part contains the information text for the user HTML( "<h4> In this tab you can determine after how many iterations forgetting should take place.</h4> <br><h4> Consider how many balls of each color you have in the urn. What effect would having much more white balls and a high number of balls as a reward for a streak mean? </h4> <br> <h4> Thinking about habits in this way, one can imagine that after many succesful times that you managed to fight a bad habit, you get an extra boost. Alternatively, after having displayed a bit habit ten times in a row, one might feel like giving up fighting it. </h4> <br><h4> The range for all input variables is the following: <ul><li> The starting number of black and white balls: 1-500 </li> <li>The number of iterations: 1-500</li><li> The number of runs: 1-100</li><li> The number of balls that need to be picked of the same color that should constitute a streak: 4-50 </li> <li>The number of balls that are added to the urn after a streak: 5-100</li></ul></h4> ") ), box( # this box contains the input for the initial urn and also for the streak title = "Select the input for the urn", numericInput(inputId = "BB4", label = "Number of black balls", 1, min = 1, max = 500), # input for the number of white balls numericInput(inputId = "WB4", label= "Number of white balls", 1, min = 1, max = 500), # input for the number of iterations numericInput(inputId = "nIt4", label= "Number of iterations", 50, min = 5, max = 500), # input for the number of runs/trials numericInput(inputId = "nRun4", label= "Number of runs", 10, min = 1, max = 100), # input for when the streak should happen numericInput(inputId = "streak_when4", label = "When should the streak happen?", 10, min = 4, max = 50), # input for the number of balls that should be added to the urn when the streak happens numericInput(inputId = "streak_howmany4", label= "How many balls to add when the streak happens?", 5, min = 5, max = 100) ) ), # the plot plotOutput("plot4", height=500, width= 1810) ), ################## TAB 5 - QUIZZES tabItem(tabName = "m5", fluidRow( HTML( # title of the page "<h1 align ='center'><b> Quiz </b></h3><br><br><br>"), box(status = "primary", title = "Answer these questions to see what you've learned", # the blue header of the quiz box with title solidHeader = TRUE, width = 8, ################## QUESTION 1 fluidRow( column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q1", "How can you increase the proportion of white balls?", c("Start with more white balls" = "a", "Put more black balls back after one had been drawn" = "b", "Forgetting should take place after half the iterations" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r1") ), ################## QUESTION 2 column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q2", "How would you increase the proportion of black balls?", c("Start with 2 white balls and one black ball but put 5 black balls and only 1 black ball as feedback" = "a", " Alter how many balls to add when a streak happens" = "b", "Forgetting should only take place for white balls" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r2") ) ), ################## QUESTION 3 fluidRow( column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q3", "When does forgetting have a (relatively) high impact?", c("If it happens early in the trial" = "a", "When you get lucky and only balls of one colour get forgotten" = "b", "If most balls up to that point are 'forgotten'" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r3") ), ################## QUESTION 4 column(6, # the radioButton contains the id of the question, the question iteself and the anser choices, including 3 choices and a 'null' answer radioButtons("q4", " When is the proportion of the two colours relatively stable ", c("When you start with few balls of each" = "a", "When the urn is composed of approximately the same number of balls" = "b", "At nighttime" = "c", "Select one" = "null"), selected = "null" ), uiOutput("r4") ) ) ), # this is the code for the 'box' on the right side of the page containing the 3 images showing the statistics of the answers box(status = "primary", title = "Your results", verticalLayout( infoBoxOutput("attemptBox", width = 14), # the attempt box infoBoxOutput("solvedBox", width = 14), # the '% solved' box infoBoxOutput("correctBox", width = 14) # the '% correct' box ), # the box's measures width = 4, height = 390 ) ) ) ) ) ) ####################################################################################### ############################ START OF THE SERVER ###################################### ####################################################################################### server <- function(input, output,session) { # SIDEBAR items, including the name of each page (as displayed in the sidebar), its id and icon output$menu <- renderMenu({ sidebarMenu( # introduction sidebar menuItem("Introduction", tabName="m1", icon = icon("home")), # feedback sidebar menuItem("Feedback", tabName="m2", icon = icon("envelope")), # forgetting sidebar menuItem("Forgetting", tabName="m3", icon = icon("users")), # streak sidebar menuItem("Streak", tabName="m4", icon = icon("line-chart")), # quiz sidebar menuItem("Quiz", tabName="m5", icon = icon("check")) ) }) # isolate({updateTabItems(session, "tabs", "m4")}) # if you want the dashboard to open with a specific tab number (ohter than the first) ####### PLOT 1 - the basic output$plot1 <- renderPlot ({ #### the actual code that simulates the urn source("plotfun.R") # this is necessary since the function is in this R file and this line copies the function into this file # since we are calling a function from another file, it is better to put all the user inputted data into variables in this R file, because you can't do that in the function call, as it doesn't recognise 'input$' # need to name these variables separately for each plot so that i.e. WB in the first dashboard won't affect WB in the 2nd nBB1 <- input$BB1 nWB1 <- input$WB1 nIt1 <- input$nIt1 nRun1 <- input$nRun1 # calling the function basicPlot from plotfun.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- basicPlot(nBB1, nWB1, nIt1, nRun1) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model Graph", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ##### PLOT 2 output$plot2 <- renderPlot ({ #### the actual code that simulates the urn source("feedbackPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB2 <- input$BB2 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB2 <- input$WB2 nIt2 <- input$nIt2 nRun2 <- input$nRun2 BB_feedback2 <- input$BB_feedback2 WB_feedback2 <- input$WB_feedback2 # calling the function feedbackPlot from feedbackPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- feedbackPlot(nBB2, nWB2, nIt2, nRun2, BB_feedback2, WB_feedback2) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model Graph", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ##### PLOT 3 - forgetting output$plot3 <- renderPlot ({ source("forgettingPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB3 <- input$BB3 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB3 <- input$WB3 nIt3 <- input$nIt3 nRun3 <- input$nRun3 forget_when3 <- input$forget_when3 forget_howmany3 <- input$forget_howmany3 # calling the function forgettingPlot from forgettingPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- forgettingPlot(nBB3, nWB3, nIt3, nRun3, forget_when3, forget_howmany3) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model with Option to Suffer a Trauma", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) #### PLOT 4 - STREAK output$plot4 <- renderPlot ({ source("streakPlot.R") # this is necessary since the function is in this R file and this line copies the function into this file # put all the user-inputted data into fixed variables for each input nBB4 <- input$BB4 # need to name these variables separately so that i.e. WB in the first dashboard won't affect WB in the 2nd nWB4 <- input$WB4 nIt4 <- input$nIt4 nRun4 <- input$nRun4 streak_when4 <- input$streak_when4 streak_howmany4 <- input$streak_howmany4 # calling the function streakPlot from streakPlot.R with all the inputs --- M is a matrix that contains the datapoints from which the plot will be made M <- streakPlot(nBB4, nWB4, nIt4, nRun4,streak_when4, streak_howmany4) # make the data ready for visualisation df <- as.data.frame(M) #transform matrix into a data frame df$id = 1:nrow(df) # name the rows final_data <- melt(df, id='id') # putting the data into long format names(final_data) <- c('id', 'Runs', 'value') # naming the columns so they can be used in the ggplot # the actual code for the ggplot g <- ggplot() + geom_line(data = final_data, aes(x = id, y = value, color = Runs, group = Runs), size = 1) # the title, subtitle for the plot g <- g + labs(title="Polya's Urn Model with Option to Suffer a Trauma", subtitle="Showing the proportion of white balls in the urn", y="Proportion of white balls", x="Iterations", caption="") # PLOT it plot(g) }) ################ QUIZ CODE #################### pro <- reactiveValues(data = numeric(4), work = numeric(4)) ## pro is needed to update the statistics boxes on the right ####### QUESTION 1 output$r1 <- renderText({ # if the answer is the correct one (in this case a) if(input$q1 == "a"){ # update solved and attempted boxes pro$data[1] <- 1 pro$work[1] <- 1 # display that it is correct HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' } else if(input$q1 == "null"){ pro$work[1] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[1] <- 0 pro$work[1] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 2 output$r2 <- renderText({ # if the answer is the correct one (in this case a) if(input$q2 == "a"){ pro$data[2] <- 1 pro$work[2] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q2 == "null"){ pro$work[2] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[2] <- 0 pro$work[2] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 3 output$r3 <- renderText({ # if the answer is the correct one (in this case c) if(input$q3 == "c"){ pro$data[3] <- 1 pro$work[3] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q3 == "null"){ # none selected pro$work[3] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[3] <- 0 pro$work[3] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ####### QUESTION 4 output$r4 <- renderText({ # if the answer is the correct one (in this case b) if(input$q4 == "b"){ pro$data[4] <- 1 pro$work[4] <- 1 HTML("<h5 style = 'color:green' align = 'left'><b>Correct!</b></h5>") # if nothing is selected, the dot is in the 4th option 'select one' (this is the baseline) } else if(input$q4 == "null") { pro$work[4] <- 0 HTML("<br>") # if a wrong answer is selected (so basically not null or the correct one) then only the attempt part gets updated } else { # the solved box doesnt, but the attempt box does get updated pro$data[4] <- 0 pro$work[4] <- 1 # display that it is the incorrect choice HTML("<h5 style ='color:red' align='left'><b>Wrong! Try again!</b></h5>") } }) ############## BOXOUTPUT (on the quiz page, provides info about having completed the quizzes and how many questions one got right) ### questions attempted output$attemptBox <- renderInfoBox({ # title AND what to display infoBox("", "Attempted", paste(round(100*sum(pro$work)/4, 0), "%"), # icon icon = icon("pencil", lib = "glyphicon"), # color color = "orange" ) }) ### questions answered correctly output$solvedBox <- renderInfoBox({ # title AND what to display infoBox("", "Solved", paste(round(100*sum(pro$data)/4, 0), "%"), # icon icon = icon("check", lib = "glyphicon"), # color color = "green" ) }) ### percentage of correct answers output$correctBox <- renderInfoBox({ # title AND what to display if there's no numeric input if(sum(pro$data)/sum(pro$work) == "NaN"){ # what gets displayed infoBox("", "Percentage correct", paste("NA"), # icon icon = icon("thumbs-up", lib = "glyphicon"), # color color = "blue" ) } else { # title AND what to display when the value is not 0 anymore infoBox("", "Percentage correct", paste(round(sum(pro$data)/sum(pro$work)*100), "%"), # icon icon = icon("thumbs-up", lib = "glyphicon"), # color color = "blue" ) } }) } shinyApp(ui, server) ######################################## THE END OF THE CODE ###################################################
############################################################################## ### ### MST ### ### Plot Random Main ### ############################################################################## ### Copyright (c) 2015-2018, The TRONCO Team (www.troncopackage.org) ### email: tronco@disco.unimib.it ### All rights reserved. This program and the accompanying materials ### are made available under the terms of the GNU GPL v3.0 ### which accompanies this distribution ############################################################################## library(ggplot2) library(Rmisc) samples = c(5, 7, 10, 20, 50) epos = c(0.000, 0.050, 0.100, 0.150, 0.200) eneg = c(0.000, 0.050, 0.100, 0.150, 0.200) source('../giulio.plot.R') e = new.env() for (type in c('accuracy', 'hamming_distance', 'specificity', 'sensitivity')) { for (branching in c('random_forest')) { load(paste0('RData/results.values.', type, '.', branching, '.RData'), envir = e) results.values = e$results.values load(paste0('RData/results.', type, '.', branching, '.RData'), envir = e) results = e$results ## CAPRI CAPRESE plotlist = list() plotlist.median = list() plot.id = 1 cat('capri caprese', branching, '\n') for (sample in samples) { p = dotplotter(results.values, sample, c('capri_bic', 'capri_aic', 'capri_loglik', 'caprese_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('capri_bic', 'capri_aic', 'capri_loglik', 'caprese_no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/capri_caprese', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/capri_caprese', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## GABOW plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'gabow_no_rising_no.raising.entropy.no.reg', 'gabow_no_rising_no.raising.pmi.no.reg', 'gabow_no_rising_no.raising.cpmi.no.reg', 'gabow_no_rising_no.raising.mi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/gabow', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/gabow', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## EDMONDS plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/edmonds', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/edmonds', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## PRIM CHOW LIU plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/prim_chowliu', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/prim_chowliu', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## ALL plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('capri_bic', 'capri_aic', 'caprese_no.reg', 'edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg', 'gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/all', type, branching, '.pdf', sep = '_'), height = 18, width = 22) multiplot(plotlist = plotlist) dev.off() } } ### end of file -- plot.random.main.R
/experiment_6_multiple/plot.random.main.R
no_license
BIMIB-DISCo/MST
R
false
false
8,821
r
############################################################################## ### ### MST ### ### Plot Random Main ### ############################################################################## ### Copyright (c) 2015-2018, The TRONCO Team (www.troncopackage.org) ### email: tronco@disco.unimib.it ### All rights reserved. This program and the accompanying materials ### are made available under the terms of the GNU GPL v3.0 ### which accompanies this distribution ############################################################################## library(ggplot2) library(Rmisc) samples = c(5, 7, 10, 20, 50) epos = c(0.000, 0.050, 0.100, 0.150, 0.200) eneg = c(0.000, 0.050, 0.100, 0.150, 0.200) source('../giulio.plot.R') e = new.env() for (type in c('accuracy', 'hamming_distance', 'specificity', 'sensitivity')) { for (branching in c('random_forest')) { load(paste0('RData/results.values.', type, '.', branching, '.RData'), envir = e) results.values = e$results.values load(paste0('RData/results.', type, '.', branching, '.RData'), envir = e) results = e$results ## CAPRI CAPRESE plotlist = list() plotlist.median = list() plot.id = 1 cat('capri caprese', branching, '\n') for (sample in samples) { p = dotplotter(results.values, sample, c('capri_bic', 'capri_aic', 'capri_loglik', 'caprese_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('capri_bic', 'capri_aic', 'capri_loglik', 'caprese_no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/capri_caprese', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/capri_caprese', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## GABOW plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'gabow_no_rising_no.raising.entropy.no.reg', 'gabow_no_rising_no.raising.pmi.no.reg', 'gabow_no_rising_no.raising.cpmi.no.reg', 'gabow_no_rising_no.raising.mi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/gabow', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/gabow', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## EDMONDS plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/edmonds', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/edmonds', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## PRIM CHOW LIU plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/prim_chowliu', type, branching, '.pdf', sep = '_'), height = 14, width = 11) multiplot(plotlist = plotlist) dev.off() ##pdf(paste('plot/prim_chowliu', branching, '_median.pdf', sep = '_'), height = 14, width = 11) ##multiplot(plotlist = plotlist.median) ##dev.off() ## ALL plotlist = list() plotlist.median = list() plot.id = 1 for (sample in samples) { p = dotplotter(results.values, sample, c('capri_bic', 'capri_aic', 'caprese_no.reg', 'edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg', 'gabow_entropy.no.reg', 'gabow_pmi.no.reg', 'gabow_cpmi.no.reg', 'gabow_mi.no.reg', 'chowliu_loglik', 'prim_no.reg', 'scite_no.reg'), branching, type, paste('SAMPLE SIZE = ', sample)) plotlist[[plot.id]] = p m = medianplotter(results, sample, c('edmonds_entropy.no.reg', 'edmonds_pmi.no.reg', 'edmonds_cpmi.no.reg'), branching, paste('SAMPLE SIZE = ', sample)) plotlist.median[[plot.id]] = m plot.id = plot.id + 1 } pdf(paste('plot/all', type, branching, '.pdf', sep = '_'), height = 18, width = 22) multiplot(plotlist = plotlist) dev.off() } } ### end of file -- plot.random.main.R
## load required packages if(!require('xlsx')) {install.packages('xlsx')} library('xlsx') if(!require('ggplot2')) {install.packages('ggplot2')} library('ggplot2') ## import sales data my_dir <- "C:/Users/Marcus/Documents/R/Regression" df_data <- read.xlsx(paste0(my_dir, "/data/toy_sales_data.xlsx"), sheetName = "data") ## plot of Sales vs TV and Digital Investments ggplot(df_data, aes(x = tv_spend, y = sales)) + geom_point(aes(x = tv_spend, colour = "tv_spend")) + geom_point(aes(x = digital_spend, colour = "digital_spend")) + theme_minimal() + scale_colour_manual(values = c("darkorange", "dodgerblue"), name = "Investment Type", labels = c("Digital Investment", "TV Investment")) + xlab("Digital/TV Investment") + ylab("Sales") + ggtitle("Sales vs Digital & TV Investments") + theme(plot.title = element_text(hjust=0.4), legend.position = "bottom") ggsave(paste0(my_dir, "/sales_vs_investments.png")) ## correlations among Sales, TV and Digital Investments cor(df_data[, c("sales", "tv_spend", "digital_spend")]) # sales tv_spend digital_spend # sales 1.0000000 0.4406862 0.6647654 # tv_spend 0.4406862 1.0000000 0.0720594 # digital_spend 0.6647654 0.0720594 1.0000000 ## fit regression model on the data fit <- lm(sales ~ tv_spend + digital_spend, data = df_data) ## adjusted R-squared summary(fit)$adj.r.squared # [1] 0.5586161 plot(df_data$trend, fit$residuals) ## p-value and significance of each regressor summary(fit)$coefficients # Estimate Std. Error t value Pr(>|t|) # (Intercept) 6.767146e+06 9.685341e+05 6.986998 6.717198e-07 # tv_spend 1.731542e+00 6.091071e-01 2.842755 9.746777e-03 # digital_spend 4.469165e+00 9.755057e-01 4.581383 1.619200e-04 ## calculate the contribution from TV Spend to Sales in percentage and absolute dollar value tv_coef <- unname(fit$coefficients)[2] tv_contribution <- sum(df_data$tv_spend) * tv_coef tv_contribution # [1] 44108224 tv_contribution_percentage <- tv_contribution / sum(df_data$sales) * 100 tv_contribution_percentage # [1] 16.60073 ## calculate the TV return on investment (ROI) tv_investment <- sum(df_data$tv_spend) tv_ROI <- (tv_contribution - tv_investment) / tv_investment tv_ROI # [1] 0.7315423 ## Using the planned spend values for the first 3 months of 2018 and your regressions model, ## calculate the expected sales value for the first 3 months of 2018 df_planned_spend <- read.xlsx(paste0(my_dir, "/data/toy_sales_data.xlsx"), sheetName = "planned_spend") base_intercept <- unname(fit$coefficients)[1] digital_coef <- unname(fit$coefficients)[3] df_planned_spend[, "predicted_sales"] <- base_intercept + df_planned_spend$tv_spend * tv_coef + df_planned_spend$digital_spend * digital_coef df_planned_spend$predicted_sales # [1] 8334056 9082486 10892394
/regression.R
no_license
tili7864/c_test
R
false
false
2,962
r
## load required packages if(!require('xlsx')) {install.packages('xlsx')} library('xlsx') if(!require('ggplot2')) {install.packages('ggplot2')} library('ggplot2') ## import sales data my_dir <- "C:/Users/Marcus/Documents/R/Regression" df_data <- read.xlsx(paste0(my_dir, "/data/toy_sales_data.xlsx"), sheetName = "data") ## plot of Sales vs TV and Digital Investments ggplot(df_data, aes(x = tv_spend, y = sales)) + geom_point(aes(x = tv_spend, colour = "tv_spend")) + geom_point(aes(x = digital_spend, colour = "digital_spend")) + theme_minimal() + scale_colour_manual(values = c("darkorange", "dodgerblue"), name = "Investment Type", labels = c("Digital Investment", "TV Investment")) + xlab("Digital/TV Investment") + ylab("Sales") + ggtitle("Sales vs Digital & TV Investments") + theme(plot.title = element_text(hjust=0.4), legend.position = "bottom") ggsave(paste0(my_dir, "/sales_vs_investments.png")) ## correlations among Sales, TV and Digital Investments cor(df_data[, c("sales", "tv_spend", "digital_spend")]) # sales tv_spend digital_spend # sales 1.0000000 0.4406862 0.6647654 # tv_spend 0.4406862 1.0000000 0.0720594 # digital_spend 0.6647654 0.0720594 1.0000000 ## fit regression model on the data fit <- lm(sales ~ tv_spend + digital_spend, data = df_data) ## adjusted R-squared summary(fit)$adj.r.squared # [1] 0.5586161 plot(df_data$trend, fit$residuals) ## p-value and significance of each regressor summary(fit)$coefficients # Estimate Std. Error t value Pr(>|t|) # (Intercept) 6.767146e+06 9.685341e+05 6.986998 6.717198e-07 # tv_spend 1.731542e+00 6.091071e-01 2.842755 9.746777e-03 # digital_spend 4.469165e+00 9.755057e-01 4.581383 1.619200e-04 ## calculate the contribution from TV Spend to Sales in percentage and absolute dollar value tv_coef <- unname(fit$coefficients)[2] tv_contribution <- sum(df_data$tv_spend) * tv_coef tv_contribution # [1] 44108224 tv_contribution_percentage <- tv_contribution / sum(df_data$sales) * 100 tv_contribution_percentage # [1] 16.60073 ## calculate the TV return on investment (ROI) tv_investment <- sum(df_data$tv_spend) tv_ROI <- (tv_contribution - tv_investment) / tv_investment tv_ROI # [1] 0.7315423 ## Using the planned spend values for the first 3 months of 2018 and your regressions model, ## calculate the expected sales value for the first 3 months of 2018 df_planned_spend <- read.xlsx(paste0(my_dir, "/data/toy_sales_data.xlsx"), sheetName = "planned_spend") base_intercept <- unname(fit$coefficients)[1] digital_coef <- unname(fit$coefficients)[3] df_planned_spend[, "predicted_sales"] <- base_intercept + df_planned_spend$tv_spend * tv_coef + df_planned_spend$digital_spend * digital_coef df_planned_spend$predicted_sales # [1] 8334056 9082486 10892394
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conversion.R \name{as_adj_list} \alias{as_adj_edge_list} \alias{as_adj_list} \alias{get.adjedgelist} \alias{get.adjlist} \title{Adjacency lists} \usage{ as_adj_list(graph, mode = c("all", "out", "in", "total")) as_adj_edge_list(graph, mode = c("all", "out", "in", "total")) } \arguments{ \item{graph}{The input graph.} \item{mode}{Character scalar, it gives what kind of adjacent edges/vertices to include in the lists. \sQuote{\code{out}} is for outgoing edges/vertices, \sQuote{\code{in}} is for incoming edges/vertices, \sQuote{\code{all}} is for both. This argument is ignored for undirected graphs.} } \value{ A list of numeric vectors. } \description{ Create adjacency lists from a graph, either for adjacent edges or for neighboring vertices } \details{ \code{as_adj_list} returns a list of numeric vectors, which include the ids of neighbor vertices (according to the \code{mode} argument) of all vertices. \code{as_adj_edge_list} returns a list of numeric vectors, which include the ids of adjacent edgs (according to the \code{mode} argument) of all vertices. } \examples{ g <- make_ring(10) as_adj_list(g) as_adj_edge_list(g) } \author{ Gabor Csardi \email{csardi.gabor@gmail.com} } \seealso{ \code{\link{as_edgelist}}, \code{\link{as_adj}} } \keyword{graphs}
/man/as_adj_list.Rd
no_license
Ruchika8/Dgraph
R
false
true
1,355
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conversion.R \name{as_adj_list} \alias{as_adj_edge_list} \alias{as_adj_list} \alias{get.adjedgelist} \alias{get.adjlist} \title{Adjacency lists} \usage{ as_adj_list(graph, mode = c("all", "out", "in", "total")) as_adj_edge_list(graph, mode = c("all", "out", "in", "total")) } \arguments{ \item{graph}{The input graph.} \item{mode}{Character scalar, it gives what kind of adjacent edges/vertices to include in the lists. \sQuote{\code{out}} is for outgoing edges/vertices, \sQuote{\code{in}} is for incoming edges/vertices, \sQuote{\code{all}} is for both. This argument is ignored for undirected graphs.} } \value{ A list of numeric vectors. } \description{ Create adjacency lists from a graph, either for adjacent edges or for neighboring vertices } \details{ \code{as_adj_list} returns a list of numeric vectors, which include the ids of neighbor vertices (according to the \code{mode} argument) of all vertices. \code{as_adj_edge_list} returns a list of numeric vectors, which include the ids of adjacent edgs (according to the \code{mode} argument) of all vertices. } \examples{ g <- make_ring(10) as_adj_list(g) as_adj_edge_list(g) } \author{ Gabor Csardi \email{csardi.gabor@gmail.com} } \seealso{ \code{\link{as_edgelist}}, \code{\link{as_adj}} } \keyword{graphs}
# downloading GBIF data library(spocc) library(mapr) library(ggplot2) library(raster) wc = getData('worldclim', var='bio', res=5, path='tmp') spdist <- occ(query='Crotalus horridus', from='gbif', limit =7500) #No info is given spdist #tiblle to get data spdist$gbif$data #look at S4 head(spdist$gbif$data) #Convert to data frame spdist_df = occ2df(spdist) map_leaflet(spdist) df = as.data.frame(occ2df(spdist$gbif)) map_leaflet(df[,c('name', 'longitude', 'latitude','locality', 'stateProvince', 'year', 'occurrenceID')]) ext = extent(-125, -55, 20, 60) wc = crop(wc, ext) #Make a data frame wc_df = as.data.frame(wc, xy=TRUE) sp_df = occ2df(spdist) ggplot() + geom_raster(data = wc_df, aes(x = x, y = y, fill = bio1/10)) + geom_point(data=sp_df, aes(x=longitude, y=latitude), col='green') + coord_quickmap() + theme_bw() + scale_fill_gradientn(colours=c('navy', 'white', 'darkred'), na.value = "black") #Exctract extr = extract(wc, sp_df[,c('longitude', 'latitude')]) head(extr) sp_ex = cbind(df[,c('name', 'longitude', 'latitude', 'stateProvince', 'year', 'occurrenceID')], extr) sp_ex = na.omit(sp_ex) head(sp_ex) ggplot() + geom_raster(data = wc_df, aes(x = x, y = y, fill = bio1/10)) + geom_point(data=sp_ex, aes(x=longitude, y=latitude), col='white', cex=0.7) + coord_quickmap() + theme_bw() + scale_fill_gradientn(colours=c('navy', 'grey90', darkred'), na.value = "black")
/R/Primary Biodiversity Data.R
no_license
stephbryson98/BIO331GeoSpatial
R
false
false
1,461
r
# downloading GBIF data library(spocc) library(mapr) library(ggplot2) library(raster) wc = getData('worldclim', var='bio', res=5, path='tmp') spdist <- occ(query='Crotalus horridus', from='gbif', limit =7500) #No info is given spdist #tiblle to get data spdist$gbif$data #look at S4 head(spdist$gbif$data) #Convert to data frame spdist_df = occ2df(spdist) map_leaflet(spdist) df = as.data.frame(occ2df(spdist$gbif)) map_leaflet(df[,c('name', 'longitude', 'latitude','locality', 'stateProvince', 'year', 'occurrenceID')]) ext = extent(-125, -55, 20, 60) wc = crop(wc, ext) #Make a data frame wc_df = as.data.frame(wc, xy=TRUE) sp_df = occ2df(spdist) ggplot() + geom_raster(data = wc_df, aes(x = x, y = y, fill = bio1/10)) + geom_point(data=sp_df, aes(x=longitude, y=latitude), col='green') + coord_quickmap() + theme_bw() + scale_fill_gradientn(colours=c('navy', 'white', 'darkred'), na.value = "black") #Exctract extr = extract(wc, sp_df[,c('longitude', 'latitude')]) head(extr) sp_ex = cbind(df[,c('name', 'longitude', 'latitude', 'stateProvince', 'year', 'occurrenceID')], extr) sp_ex = na.omit(sp_ex) head(sp_ex) ggplot() + geom_raster(data = wc_df, aes(x = x, y = y, fill = bio1/10)) + geom_point(data=sp_ex, aes(x=longitude, y=latitude), col='white', cex=0.7) + coord_quickmap() + theme_bw() + scale_fill_gradientn(colours=c('navy', 'grey90', darkred'), na.value = "black")
#setwd("~/R/coursera-data-science-getdata-course-project/rawdata") #library("microbenchmark") library("data.table") a.labels <- fread("activity_labels.txt") v.names <- fread("features.txt")[grepl("-(mean|std)\\(\\)", V2, perl = T)] v.names[, V2:=gsub("()", "", V2, fixed = T)][, V2:=gsub("-", ".", V2, fixed = T)] s <- rbind(fread("test/subject_test.txt"), fread("train/subject_train.txt")) setnames(s, names(s), c("Subject")) y <- rbind(fread("test/y_test.txt"), fread("train/y_train.txt")) y <- as.data.table(factor(y$V1, a.labels$V1, a.labels$V2, ordered = T)) setnames(y, names(y), c("Activity")) system("sed 's/ / /g' test/X_test.txt >test/X_test.txt.1") system("sed 's/ / /g' train/X_train.txt >train/X_train.txt.1") x <- rbind( fread("test/X_test.txt.1", sep = " ", select = (v.names$V1 + 1), colClasses = "numeric"), fread("train/X_train.txt.1", sep = " ", select = (v.names$V1 + 1), colClasses = "numeric") ) setnames(x, names(x), v.names$V2) data <- cbind(y, s, x) setkey(data, Activity, Subject) tidydata <- data[, lapply(.SD, mean), by = .(Activity, Subject)] write.table(tidydata, file = "tidydata.txt", row.names = F)
/run_analysis-fastest.R
no_license
ZedLeb/coursera-data-science-getdata-course-project
R
false
false
1,139
r
#setwd("~/R/coursera-data-science-getdata-course-project/rawdata") #library("microbenchmark") library("data.table") a.labels <- fread("activity_labels.txt") v.names <- fread("features.txt")[grepl("-(mean|std)\\(\\)", V2, perl = T)] v.names[, V2:=gsub("()", "", V2, fixed = T)][, V2:=gsub("-", ".", V2, fixed = T)] s <- rbind(fread("test/subject_test.txt"), fread("train/subject_train.txt")) setnames(s, names(s), c("Subject")) y <- rbind(fread("test/y_test.txt"), fread("train/y_train.txt")) y <- as.data.table(factor(y$V1, a.labels$V1, a.labels$V2, ordered = T)) setnames(y, names(y), c("Activity")) system("sed 's/ / /g' test/X_test.txt >test/X_test.txt.1") system("sed 's/ / /g' train/X_train.txt >train/X_train.txt.1") x <- rbind( fread("test/X_test.txt.1", sep = " ", select = (v.names$V1 + 1), colClasses = "numeric"), fread("train/X_train.txt.1", sep = " ", select = (v.names$V1 + 1), colClasses = "numeric") ) setnames(x, names(x), v.names$V2) data <- cbind(y, s, x) setkey(data, Activity, Subject) tidydata <- data[, lapply(.SD, mean), by = .(Activity, Subject)] write.table(tidydata, file = "tidydata.txt", row.names = F)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Sql.R \name{renderTranslateQuerySql} \alias{renderTranslateQuerySql} \title{Render, translate, and query to data.frame} \usage{ renderTranslateQuerySql( connection, sql, errorReportFile = file.path(getwd(), "errorReportSql.txt"), snakeCaseToCamelCase = FALSE, oracleTempSchema = NULL, tempEmulationSchema = getOption("sqlRenderTempEmulationSchema"), integerAsNumeric = getOption("databaseConnectorIntegerAsNumeric", default = TRUE), integer64AsNumeric = getOption("databaseConnectorInteger64AsNumeric", default = TRUE), ... ) } \arguments{ \item{connection}{The connection to the database server.} \item{sql}{The SQL to be send.} \item{errorReportFile}{The file where an error report will be written if an error occurs. Defaults to 'errorReportSql.txt' in the current working directory.} \item{snakeCaseToCamelCase}{If true, field names are assumed to use snake_case, and are converted to camelCase.} \item{oracleTempSchema}{DEPRECATED: use \code{tempEmulationSchema} instead.} \item{tempEmulationSchema}{Some database platforms like Oracle and Impala do not truly support temp tables. To emulate temp tables, provide a schema with write privileges where temp tables can be created.} \item{integerAsNumeric}{Logical: should 32-bit integers be converted to numeric (double) values? If FALSE 32-bit integers will be represented using R's native \code{Integer} class.} \item{integer64AsNumeric}{Logical: should 64-bit integers be converted to numeric (double) values? If FALSE 64-bit integers will be represented using \code{bit64::integer64}.} \item{...}{Parameters that will be used to render the SQL.} } \value{ A data frame. } \description{ This function renders, and translates SQL, sends it to the server, and returns the results as a data.frame. } \details{ This function calls the \code{render} and \code{translate} functions in the SqlRender package before calling \code{\link{querySql}}. } \examples{ \dontrun{ connectionDetails <- createConnectionDetails( dbms = "postgresql", server = "localhost", user = "root", password = "blah", schema = "cdm_v4" ) conn <- connect(connectionDetails) persons <- renderTranslatequerySql(conn, sql = "SELECT TOP 10 * FROM @schema.person", schema = "cdm_synpuf" ) disconnect(conn) } }
/man/renderTranslateQuerySql.Rd
permissive
ABMI/DatabaseConnector
R
false
true
2,345
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Sql.R \name{renderTranslateQuerySql} \alias{renderTranslateQuerySql} \title{Render, translate, and query to data.frame} \usage{ renderTranslateQuerySql( connection, sql, errorReportFile = file.path(getwd(), "errorReportSql.txt"), snakeCaseToCamelCase = FALSE, oracleTempSchema = NULL, tempEmulationSchema = getOption("sqlRenderTempEmulationSchema"), integerAsNumeric = getOption("databaseConnectorIntegerAsNumeric", default = TRUE), integer64AsNumeric = getOption("databaseConnectorInteger64AsNumeric", default = TRUE), ... ) } \arguments{ \item{connection}{The connection to the database server.} \item{sql}{The SQL to be send.} \item{errorReportFile}{The file where an error report will be written if an error occurs. Defaults to 'errorReportSql.txt' in the current working directory.} \item{snakeCaseToCamelCase}{If true, field names are assumed to use snake_case, and are converted to camelCase.} \item{oracleTempSchema}{DEPRECATED: use \code{tempEmulationSchema} instead.} \item{tempEmulationSchema}{Some database platforms like Oracle and Impala do not truly support temp tables. To emulate temp tables, provide a schema with write privileges where temp tables can be created.} \item{integerAsNumeric}{Logical: should 32-bit integers be converted to numeric (double) values? If FALSE 32-bit integers will be represented using R's native \code{Integer} class.} \item{integer64AsNumeric}{Logical: should 64-bit integers be converted to numeric (double) values? If FALSE 64-bit integers will be represented using \code{bit64::integer64}.} \item{...}{Parameters that will be used to render the SQL.} } \value{ A data frame. } \description{ This function renders, and translates SQL, sends it to the server, and returns the results as a data.frame. } \details{ This function calls the \code{render} and \code{translate} functions in the SqlRender package before calling \code{\link{querySql}}. } \examples{ \dontrun{ connectionDetails <- createConnectionDetails( dbms = "postgresql", server = "localhost", user = "root", password = "blah", schema = "cdm_v4" ) conn <- connect(connectionDetails) persons <- renderTranslatequerySql(conn, sql = "SELECT TOP 10 * FROM @schema.person", schema = "cdm_synpuf" ) disconnect(conn) } }
############################################################################### # STEP 0: SETUP ----- pkgs <- c( 'fs', 'glue', 'here', 'tidyverse', 'janitor', 'DBI', 'vroom', 'readxl', 'writexl', 'lubridate', 'tsibble', 'tsbox', 'timetk', 'xts', 'stringr', 'forcats' ) xfun::pkg_attach2(pkgs, message = FALSE) ############################################################################### # STEP 1: Price ----- con <- dbConnect( RPostgres::Postgres(), host = '192.168.4.133', user = 'postgres', password = 'smxK9T', dbname = 'db_production' ) price_oae_m_region <- dbReadTable(con, 'price_oae_m_region') price_oae_m_TH00 <- price_oae_m_region %>% filter(province_code == "TH00") %>% mutate( date = ymd(glue('{year-543}-{month}-01')), price_name = str_replace(price_name, 'น้ำยางข้น', 'น้ำยางสด') ) %>% arrange(date, commod, subcommod, price_name) price_oae_m_TH00 %>% saveRDS(here('data/price_oae_m_TH00.rds')) price_oae_m_TH00 %>% write_xlsx(here('data/price_oae_m_TH00.xlsx')) ref_price_oae <- price_oae_m_TH00 %>% count(price_name, commod, subcommod, unit) ref_price_oae %>% saveRDS(here('data/ref_price_oae.rds')) ref_price_oae %>% write_xlsx(here('data/ref_price_oae.xlsx')) ############################################################################### # STEP 2: Export ----- trade_m <- read_excel(here("data/trade_m.xlsx")) trade_m <- trade_m %>% pivot_longer( -c(product_name:unit), names_to = "date", values_to = "value", values_drop_na = TRUE ) %>% mutate(date = as.Date(date)) trade_m %>% saveRDS(here('data/trade_m.rds')) ref_trade <- trade_m %>% count(product_name, subproduct_name, variable, unit) ref_trade %>% saveRDS(here('data/ref_trade.rds'))
/oae_index/script/data-prep.R
no_license
piyayut-ch/nabc.dashboard
R
false
false
1,774
r
############################################################################### # STEP 0: SETUP ----- pkgs <- c( 'fs', 'glue', 'here', 'tidyverse', 'janitor', 'DBI', 'vroom', 'readxl', 'writexl', 'lubridate', 'tsibble', 'tsbox', 'timetk', 'xts', 'stringr', 'forcats' ) xfun::pkg_attach2(pkgs, message = FALSE) ############################################################################### # STEP 1: Price ----- con <- dbConnect( RPostgres::Postgres(), host = '192.168.4.133', user = 'postgres', password = 'smxK9T', dbname = 'db_production' ) price_oae_m_region <- dbReadTable(con, 'price_oae_m_region') price_oae_m_TH00 <- price_oae_m_region %>% filter(province_code == "TH00") %>% mutate( date = ymd(glue('{year-543}-{month}-01')), price_name = str_replace(price_name, 'น้ำยางข้น', 'น้ำยางสด') ) %>% arrange(date, commod, subcommod, price_name) price_oae_m_TH00 %>% saveRDS(here('data/price_oae_m_TH00.rds')) price_oae_m_TH00 %>% write_xlsx(here('data/price_oae_m_TH00.xlsx')) ref_price_oae <- price_oae_m_TH00 %>% count(price_name, commod, subcommod, unit) ref_price_oae %>% saveRDS(here('data/ref_price_oae.rds')) ref_price_oae %>% write_xlsx(here('data/ref_price_oae.xlsx')) ############################################################################### # STEP 2: Export ----- trade_m <- read_excel(here("data/trade_m.xlsx")) trade_m <- trade_m %>% pivot_longer( -c(product_name:unit), names_to = "date", values_to = "value", values_drop_na = TRUE ) %>% mutate(date = as.Date(date)) trade_m %>% saveRDS(here('data/trade_m.rds')) ref_trade <- trade_m %>% count(product_name, subproduct_name, variable, unit) ref_trade %>% saveRDS(here('data/ref_trade.rds'))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ShinySimulatorUI.R \docType{data} \name{shiny_simulator_ui} \alias{shiny_simulator_ui} \title{UI file for the Shiny Simulator application} \format{ An object of class \code{shiny.tag.list} (inherits from \code{list}) of length 4. } \usage{ shiny_simulator_ui } \value{ Returns the UI code for the shiny application. } \description{ UI file for the Shiny Simulator application } \keyword{datasets}
/man/shiny_simulator_ui.Rd
no_license
cran/NetSimR
R
false
true
494
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ShinySimulatorUI.R \docType{data} \name{shiny_simulator_ui} \alias{shiny_simulator_ui} \title{UI file for the Shiny Simulator application} \format{ An object of class \code{shiny.tag.list} (inherits from \code{list}) of length 4. } \usage{ shiny_simulator_ui } \value{ Returns the UI code for the shiny application. } \description{ UI file for the Shiny Simulator application } \keyword{datasets}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_taxa.R \name{plot_taxa_sample_time} \alias{plot_taxa_sample_time} \title{Plot dates and times samples were taken (DEPRECATED)} \usage{ plot_taxa_sample_time(observation, id = NA_character_, alpha = 1) } \arguments{ \item{observation}{(tbl_df, tbl, data.frame) The observation table.} \item{id}{(character) Identifier of dataset to be used in plot subtitles.} \item{alpha}{(numeric) Alpha-transparency scale of data points. Useful when many data points overlap. Allowed values are between 0 and 1, where 1 is 100\% opaque. Default is 1.} } \value{ (gg, ggplot) A gg, ggplot object if assigned to a variable, otherwise a plot to your active graphics device } \description{ This function has been deprecated. Use \code{plot_sample_space_time()} instead. }
/man/plot_taxa_sample_time.Rd
permissive
EDIorg/ecocomDP
R
false
true
838
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_taxa.R \name{plot_taxa_sample_time} \alias{plot_taxa_sample_time} \title{Plot dates and times samples were taken (DEPRECATED)} \usage{ plot_taxa_sample_time(observation, id = NA_character_, alpha = 1) } \arguments{ \item{observation}{(tbl_df, tbl, data.frame) The observation table.} \item{id}{(character) Identifier of dataset to be used in plot subtitles.} \item{alpha}{(numeric) Alpha-transparency scale of data points. Useful when many data points overlap. Allowed values are between 0 and 1, where 1 is 100\% opaque. Default is 1.} } \value{ (gg, ggplot) A gg, ggplot object if assigned to a variable, otherwise a plot to your active graphics device } \description{ This function has been deprecated. Use \code{plot_sample_space_time()} instead. }
pilgrimDB = read.csv("PilgrimData.csv") View(pilgrimDB) #Makes scatter plot of Tenure vs Profit in 1999 plot(pilgrimDB$Profit..1999.~ pilgrimDB$Tenure..1999., main = "Scatter plot of Tenure and Profit in 1999", xlab = "Tenure (Years)", ylab = "Profit ($)") #Breaks profit values (1999) into 3 levels pilgrimDB$Profit..1999.Level = "0" pilgrimDB[pilgrimDB$Profit..1999. <= 0,]$Profit..1999.Level = "Level1" pilgrimDB[pilgrimDB$Profit..1999. > 0 & pilgrimDB$Profit..1999. <= 1000,]$Profit..1999.Level = "Level2" pilgrimDB[pilgrimDB$Profit..1999. > 1000,]$Profit..1999.Level = "Level3" table(pilgrimDB$Profit..1999.Level) #Makes pivot table of Age and Profit Levels AgeProfitPivot = table(pilgrimDB$Age..1999., pilgrimDB$Profit..1999.Level) AgeProfitPivot #Makes Pivot table of Onlinepay vs Billpay in 1999 and 2000 OnlinePayBillPay99 = table(pilgrimDB$Online..1999., pilgrimDB$Billpay..1999.) OnlinePayBillPay00 = table(pilgrimDB$Online..2000., pilgrimDB$Billpay..2000.) OnlinePayBillPay99 OnlinePayBillPay00 #Makes Pivot table of Income vs Profit Levels in 1999 IncomeProfitPivot = table(pilgrimDB$Income..1999., pilgrimDB$Profit..1999.Level) IncomeProfitPivot #Makes Pivot table of Income vs Age in 1999 IncomeAgePivot = table(pilgrimDB$Income..1999., pilgrimDB$Age..1999.) IncomeAgePivot
/SeansPart.R
no_license
galahadho/Pilgrim-Case-Study
R
false
false
1,328
r
pilgrimDB = read.csv("PilgrimData.csv") View(pilgrimDB) #Makes scatter plot of Tenure vs Profit in 1999 plot(pilgrimDB$Profit..1999.~ pilgrimDB$Tenure..1999., main = "Scatter plot of Tenure and Profit in 1999", xlab = "Tenure (Years)", ylab = "Profit ($)") #Breaks profit values (1999) into 3 levels pilgrimDB$Profit..1999.Level = "0" pilgrimDB[pilgrimDB$Profit..1999. <= 0,]$Profit..1999.Level = "Level1" pilgrimDB[pilgrimDB$Profit..1999. > 0 & pilgrimDB$Profit..1999. <= 1000,]$Profit..1999.Level = "Level2" pilgrimDB[pilgrimDB$Profit..1999. > 1000,]$Profit..1999.Level = "Level3" table(pilgrimDB$Profit..1999.Level) #Makes pivot table of Age and Profit Levels AgeProfitPivot = table(pilgrimDB$Age..1999., pilgrimDB$Profit..1999.Level) AgeProfitPivot #Makes Pivot table of Onlinepay vs Billpay in 1999 and 2000 OnlinePayBillPay99 = table(pilgrimDB$Online..1999., pilgrimDB$Billpay..1999.) OnlinePayBillPay00 = table(pilgrimDB$Online..2000., pilgrimDB$Billpay..2000.) OnlinePayBillPay99 OnlinePayBillPay00 #Makes Pivot table of Income vs Profit Levels in 1999 IncomeProfitPivot = table(pilgrimDB$Income..1999., pilgrimDB$Profit..1999.Level) IncomeProfitPivot #Makes Pivot table of Income vs Age in 1999 IncomeAgePivot = table(pilgrimDB$Income..1999., pilgrimDB$Age..1999.) IncomeAgePivot
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/selection.R \name{selectInd} \alias{selectInd} \title{Select individuals} \usage{ selectInd( pop, nInd, trait = 1, use = "pheno", sex = "B", selectTop = TRUE, returnPop = TRUE, candidates = NULL, simParam = NULL, ... ) } \arguments{ \item{pop}{and object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}}} \item{nInd}{the number of individuals to select} \item{trait}{the trait for selection. Either a number indicating a single trait or a function returning a vector of length nInd.} \item{use}{select on genetic values "gv", estimated breeding values "ebv", breeding values "bv", phenotypes "pheno", or randomly "rand"} \item{sex}{which sex to select. Use "B" for both, "F" for females and "M" for males. If the simulation is not using sexes, the argument is ignored.} \item{selectTop}{selects highest values if true. Selects lowest values if false.} \item{returnPop}{should results be returned as a \code{\link{Pop-class}}. If FALSE, only the index of selected individuals is returned.} \item{candidates}{an optional vector of eligible selection candidates.} \item{simParam}{an object of \code{\link{SimParam}}} \item{...}{additional arguments if using a function for trait} } \value{ Returns an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} } \description{ Selects a subset of nInd individuals from a population. } \examples{ #Create founder haplotypes founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #Set simulation parameters SP = SimParam$new(founderPop) SP$addTraitA(10) SP$setVarE(h2=0.5) #Create population pop = newPop(founderPop, simParam=SP) #Select best 5 pop2 = selectInd(pop, 5, simParam=SP) }
/fuzzedpackages/AlphaSimR/man/selectInd.Rd
no_license
akhikolla/testpackages
R
false
true
1,843
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/selection.R \name{selectInd} \alias{selectInd} \title{Select individuals} \usage{ selectInd( pop, nInd, trait = 1, use = "pheno", sex = "B", selectTop = TRUE, returnPop = TRUE, candidates = NULL, simParam = NULL, ... ) } \arguments{ \item{pop}{and object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}}} \item{nInd}{the number of individuals to select} \item{trait}{the trait for selection. Either a number indicating a single trait or a function returning a vector of length nInd.} \item{use}{select on genetic values "gv", estimated breeding values "ebv", breeding values "bv", phenotypes "pheno", or randomly "rand"} \item{sex}{which sex to select. Use "B" for both, "F" for females and "M" for males. If the simulation is not using sexes, the argument is ignored.} \item{selectTop}{selects highest values if true. Selects lowest values if false.} \item{returnPop}{should results be returned as a \code{\link{Pop-class}}. If FALSE, only the index of selected individuals is returned.} \item{candidates}{an optional vector of eligible selection candidates.} \item{simParam}{an object of \code{\link{SimParam}}} \item{...}{additional arguments if using a function for trait} } \value{ Returns an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}} } \description{ Selects a subset of nInd individuals from a population. } \examples{ #Create founder haplotypes founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #Set simulation parameters SP = SimParam$new(founderPop) SP$addTraitA(10) SP$setVarE(h2=0.5) #Create population pop = newPop(founderPop, simParam=SP) #Select best 5 pop2 = selectInd(pop, 5, simParam=SP) }
makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix<-function(solve) m<<- solve getmatrix<-function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } cacheSolve <- function(x=matrix(), ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m }
/assignment2.R
no_license
William9208/RProgrammingAssignment2
R
false
false
490
r
makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setmatrix<-function(solve) m<<- solve getmatrix<-function() m list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) } cacheSolve <- function(x=matrix(), ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m }
\name{align.MEME} \alias{align.MEME} \title{ Multiple sequence alignment by means of MEME.} \description{ DNA sequences are aligned by means of MEME Version 4.4.0. (Multiple Expectation-Maximization for Motif Elicitation)} \usage{align.MEME(filein, fileout = "Sq.fa", iicc)} \arguments{ \item{filein}{ A set of nucleotide sequences in FASTA format.} \item{fileout}{ Output file in FASTA format} \item{iicc}{ A list of argument input.} } \details{ This function needs aaMI-package. This funtions works with meme<=4.3.0} \value{Output is a file in FASTA format with aligned nucleotide sequences.} \author{ Erola Pairo <epeiroatibec.pcb.ub.es> and Joan Maynou <joan.maynouatupc.edu> } \seealso{align.clustalw, align.MUSCLE}
/man/align.MEME.Rd
no_license
mikepipo/MEET
R
false
false
732
rd
\name{align.MEME} \alias{align.MEME} \title{ Multiple sequence alignment by means of MEME.} \description{ DNA sequences are aligned by means of MEME Version 4.4.0. (Multiple Expectation-Maximization for Motif Elicitation)} \usage{align.MEME(filein, fileout = "Sq.fa", iicc)} \arguments{ \item{filein}{ A set of nucleotide sequences in FASTA format.} \item{fileout}{ Output file in FASTA format} \item{iicc}{ A list of argument input.} } \details{ This function needs aaMI-package. This funtions works with meme<=4.3.0} \value{Output is a file in FASTA format with aligned nucleotide sequences.} \author{ Erola Pairo <epeiroatibec.pcb.ub.es> and Joan Maynou <joan.maynouatupc.edu> } \seealso{align.clustalw, align.MUSCLE}
testlist <- list(data = structure(c(3.21991641333768e-310, 4.94078866277837e+131 ), .Dim = 1:2), q = 0) result <- do.call(biwavelet:::rcpp_row_quantile,testlist) str(result)
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554355-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
173
r
testlist <- list(data = structure(c(3.21991641333768e-310, 4.94078866277837e+131 ), .Dim = 1:2), q = 0) result <- do.call(biwavelet:::rcpp_row_quantile,testlist) str(result)
# load data set power <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?") # Convert the Date variable and get susbset of data from the dates 2007-02-01 and 2007-02-02 power$Date <- as.Date(power$Date, "%d/%m/%Y") power_sub <- power[power$Date %in% c(as.Date("1/2/2007","%d/%m/%Y"), as.Date("2/2/2007","%d/%m/%Y")), ] # Create File & Build Plots png(filename="plot4.png", width=480, height=480) #specifies output to PNG and sets width and height par(mfcol=c(2,2), mar=c(4,4,2,2), cex=.8) #create 2 x 2 plot structure #Plot 1 plot(power_sub$Global_active_power, type="l", xlab="", ylab="Global Active Power", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 2 plot(power_sub$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", axes="False", frame.plot="TRUE") lines(power_sub$Sub_metering_2, type="l", col="red") lines(power_sub$Sub_metering_3, type="l", col="blue") legend("topright",col=c("black","red","blue"), lty=1, xjust=1, box.lwd=0,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 3 plot(power_sub$Voltage, type="l", xlab="datetime", ylab="Voltage", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 4 plot(power_sub$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) dev.off() #saves PNG file
/plot4.R
no_license
prsalm/ExData_Plotting1
R
false
false
2,177
r
# load data set power <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?") # Convert the Date variable and get susbset of data from the dates 2007-02-01 and 2007-02-02 power$Date <- as.Date(power$Date, "%d/%m/%Y") power_sub <- power[power$Date %in% c(as.Date("1/2/2007","%d/%m/%Y"), as.Date("2/2/2007","%d/%m/%Y")), ] # Create File & Build Plots png(filename="plot4.png", width=480, height=480) #specifies output to PNG and sets width and height par(mfcol=c(2,2), mar=c(4,4,2,2), cex=.8) #create 2 x 2 plot structure #Plot 1 plot(power_sub$Global_active_power, type="l", xlab="", ylab="Global Active Power", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 2 plot(power_sub$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", axes="False", frame.plot="TRUE") lines(power_sub$Sub_metering_2, type="l", col="red") lines(power_sub$Sub_metering_3, type="l", col="blue") legend("topright",col=c("black","red","blue"), lty=1, xjust=1, box.lwd=0,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 3 plot(power_sub$Voltage, type="l", xlab="datetime", ylab="Voltage", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) #Plot 4 plot(power_sub$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power", axes="False", frame.plot="TRUE") axis(2) axis(1,at=c(0,1440,2900),labels=c("Thur","Fri","Sat")) dev.off() #saves PNG file
#### 1. Setup (You Try It) #### ## to start with, we will load a package for data management ## and load a package for fitting Linear Mixed effects Models (LMMs) ## recall that loading a package is like opening an app ## and you need to repeat this process each time you start up R ## if everything installed successfully already ## this code should work and return no errors ## if this does not work, try to install it first ## by uncommenting the install packages code # install.packages("data.table", dependencies = TRUE) # install.packages("lme4", dependencies = TRUE) # install.packages("ggplot2", dependencies = TRUE) library(data.table) library(lme4) library(ggplot2) ## load the data (note this is already an R dataset) ## so we use a new function readRDS() ## make sure that the data file is located in ## your project directory d <- readRDS("aces_daily_sim_processed.RDS") ## see the names of the variables in the dataset names(d) #### 2. Calculating Between & Within Effects (Demonstration) #### ## examine the distribution of stress in two participants ## you can see how the means seem to differ but also each participant ## has variation within them in their level of stress ggplot(d[UserID %in% c(1, 2)], aes(STRESS, fill = UserID)) + geom_density() ## calculate the individual mean stress BY participant ID d[, MeanStress := mean(STRESS, na.rm = TRUE), by = UserID] ## calculate the deviation stress scores ## by taking the difference between observed stress scores and the ## indidivudal means ## note that we do not need to specify BY participant ID because the ## individual means are already repeated on each row of the dataset d[, DeviationStress := STRESS - MeanStress] ## now look at a few rows of the data to check what happened d[UserID == 1, .(UserID, STRESS, MeanStress, DeviationStress)] ## look at the mean deviation stress (this should basically be 0) ## representing that it is a within only variable ## note that R may use scientific notation: ## 1.4e4 = 1, then move decimal four spots to the right = 14000 ## 1.4e-4 = 1, then move decimal four spots to the left = .00014 mean(d$DeviationStress, na.rm=TRUE) ## now we can estimate LMMs ## first we only use STRESS, which combines both between & within effects summary(lmer(PosAff ~ STRESS + (1 | UserID), data = d)) ## next we use our new mean and deviation stress variables ## to separate the between and within effects of stress summary(lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID), data = d)) #### 3. Calculating Between & Within Effects (You Try It) #### ## in pairs or small groups, pick one of the other variables ## in the dataset (not STRESS, not PosAff) that is repeatedly measured ## calculate individual means and deviations from the means ## then fit a model predicting positive affect, first from the overall ## score and then from the mean and deviation scores. ## if you need a refresher on what variables are available ## take a look at the table in the slides. ## calculate individual means by ID d[, := mean( , na.rm = TRUE), by = UserID] ## calculate the deviation scores d[, := - ] ## fit a linear mixed model using the original variable to predict positive affect summary(lmer(PosAff ~ + (1 | UserID), data = d)) ## fit a linear mixed model using the mean and deviation ## variables to predict positive affect summary(lmer(PosAff ~ + (1 | UserID), data = d)) #### 4. Random Slopes (Demonstration) #### ## random intercept and fixed effects only m1a <- lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID), data = d) ## random intercept, random slope, and fixed effects m1b <- lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID) + (0 + DeviationStress | UserID), data = d) ## correlated random intercept and random slope, and fixed effects m1c <- lmer(PosAff ~ MeanStress + DeviationStress + (1 + DeviationStress | UserID), data = d) ## generate summaries of the models and compare ## note the standard errors in particular summary(m1a) summary(m1b) summary(m1c) ## does adding the random slope improve model fit? anova(m1a, m1b, test = "LRT") ## does allow the random intercept and slope to correlate improve model fit? anova(m1b, m1c, test = "LRT") ## overall does the model with correlated random intercept and slope ## fit better than a random intercept only model ## simultaneously tests 2 parameters: slope variance + 1 correlation anova(m1a, m1c, test = "LRT") ## Convergence Issue Example ## Example of a model with convergence & fit issues ## note the "singular fit" and convergence warning summary(lmer(PosAff ~ STRESS + SOLs + NegAff + WASONs + (1 + STRESS + SOLs + NegAff + WASONs | UserID), data = d)) ## this is an example where we might simplify the structure to ## aid convergence and estimation ## note that in the above model with poor convergence ## the SDs for SOLs and WASONs are very small ## we could consider dropping these random slopes ## and just keep as fixed effects summary(lmer(PosAff ~ STRESS + SOLs + NegAff + WASONs + (1 + STRESS + NegAff | UserID), data = d)) #### 5. Random Slopes (You Try It) #### ## in pairs or small groups, use the same variable you chose ## earlier to create individual means and deviations from the means ## use these variables to complete the models below and discuss ## their interpretation amongst yourselves ## random intercept and fixed effects only m2a <- lmer(PosAff ~ + + (1 | UserID), data = d) ## random intercept, random slope, and fixed effects m2b <- lmer(PosAff ~ + + (1 | UserID) + (0 + | UserID), data = d) ## correlated random intercept and random slope, and fixed effects m2c <- lmer(PosAff ~ + + (1 + | UserID), data = d) ## generate summaries of the models and compare ## what happens to the standard errors in the fixed ## only vs fixed + random slope models? summary( ) ## Use the anova() function to answer these questions ## for YOUR variable ## does adding the random slope improve model fit? anova( , , test = "LRT") ## does allow the random intercept and slope to correlate improve model fit? ## overall does the model with correlated random intercept and slope ## fit better than a random intercept only model?
/LMM_Pt2_worksheet.R
permissive
tegansellick/MonashHonoursStatistics
R
false
false
6,460
r
#### 1. Setup (You Try It) #### ## to start with, we will load a package for data management ## and load a package for fitting Linear Mixed effects Models (LMMs) ## recall that loading a package is like opening an app ## and you need to repeat this process each time you start up R ## if everything installed successfully already ## this code should work and return no errors ## if this does not work, try to install it first ## by uncommenting the install packages code # install.packages("data.table", dependencies = TRUE) # install.packages("lme4", dependencies = TRUE) # install.packages("ggplot2", dependencies = TRUE) library(data.table) library(lme4) library(ggplot2) ## load the data (note this is already an R dataset) ## so we use a new function readRDS() ## make sure that the data file is located in ## your project directory d <- readRDS("aces_daily_sim_processed.RDS") ## see the names of the variables in the dataset names(d) #### 2. Calculating Between & Within Effects (Demonstration) #### ## examine the distribution of stress in two participants ## you can see how the means seem to differ but also each participant ## has variation within them in their level of stress ggplot(d[UserID %in% c(1, 2)], aes(STRESS, fill = UserID)) + geom_density() ## calculate the individual mean stress BY participant ID d[, MeanStress := mean(STRESS, na.rm = TRUE), by = UserID] ## calculate the deviation stress scores ## by taking the difference between observed stress scores and the ## indidivudal means ## note that we do not need to specify BY participant ID because the ## individual means are already repeated on each row of the dataset d[, DeviationStress := STRESS - MeanStress] ## now look at a few rows of the data to check what happened d[UserID == 1, .(UserID, STRESS, MeanStress, DeviationStress)] ## look at the mean deviation stress (this should basically be 0) ## representing that it is a within only variable ## note that R may use scientific notation: ## 1.4e4 = 1, then move decimal four spots to the right = 14000 ## 1.4e-4 = 1, then move decimal four spots to the left = .00014 mean(d$DeviationStress, na.rm=TRUE) ## now we can estimate LMMs ## first we only use STRESS, which combines both between & within effects summary(lmer(PosAff ~ STRESS + (1 | UserID), data = d)) ## next we use our new mean and deviation stress variables ## to separate the between and within effects of stress summary(lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID), data = d)) #### 3. Calculating Between & Within Effects (You Try It) #### ## in pairs or small groups, pick one of the other variables ## in the dataset (not STRESS, not PosAff) that is repeatedly measured ## calculate individual means and deviations from the means ## then fit a model predicting positive affect, first from the overall ## score and then from the mean and deviation scores. ## if you need a refresher on what variables are available ## take a look at the table in the slides. ## calculate individual means by ID d[, := mean( , na.rm = TRUE), by = UserID] ## calculate the deviation scores d[, := - ] ## fit a linear mixed model using the original variable to predict positive affect summary(lmer(PosAff ~ + (1 | UserID), data = d)) ## fit a linear mixed model using the mean and deviation ## variables to predict positive affect summary(lmer(PosAff ~ + (1 | UserID), data = d)) #### 4. Random Slopes (Demonstration) #### ## random intercept and fixed effects only m1a <- lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID), data = d) ## random intercept, random slope, and fixed effects m1b <- lmer(PosAff ~ MeanStress + DeviationStress + (1 | UserID) + (0 + DeviationStress | UserID), data = d) ## correlated random intercept and random slope, and fixed effects m1c <- lmer(PosAff ~ MeanStress + DeviationStress + (1 + DeviationStress | UserID), data = d) ## generate summaries of the models and compare ## note the standard errors in particular summary(m1a) summary(m1b) summary(m1c) ## does adding the random slope improve model fit? anova(m1a, m1b, test = "LRT") ## does allow the random intercept and slope to correlate improve model fit? anova(m1b, m1c, test = "LRT") ## overall does the model with correlated random intercept and slope ## fit better than a random intercept only model ## simultaneously tests 2 parameters: slope variance + 1 correlation anova(m1a, m1c, test = "LRT") ## Convergence Issue Example ## Example of a model with convergence & fit issues ## note the "singular fit" and convergence warning summary(lmer(PosAff ~ STRESS + SOLs + NegAff + WASONs + (1 + STRESS + SOLs + NegAff + WASONs | UserID), data = d)) ## this is an example where we might simplify the structure to ## aid convergence and estimation ## note that in the above model with poor convergence ## the SDs for SOLs and WASONs are very small ## we could consider dropping these random slopes ## and just keep as fixed effects summary(lmer(PosAff ~ STRESS + SOLs + NegAff + WASONs + (1 + STRESS + NegAff | UserID), data = d)) #### 5. Random Slopes (You Try It) #### ## in pairs or small groups, use the same variable you chose ## earlier to create individual means and deviations from the means ## use these variables to complete the models below and discuss ## their interpretation amongst yourselves ## random intercept and fixed effects only m2a <- lmer(PosAff ~ + + (1 | UserID), data = d) ## random intercept, random slope, and fixed effects m2b <- lmer(PosAff ~ + + (1 | UserID) + (0 + | UserID), data = d) ## correlated random intercept and random slope, and fixed effects m2c <- lmer(PosAff ~ + + (1 + | UserID), data = d) ## generate summaries of the models and compare ## what happens to the standard errors in the fixed ## only vs fixed + random slope models? summary( ) ## Use the anova() function to answer these questions ## for YOUR variable ## does adding the random slope improve model fit? anova( , , test = "LRT") ## does allow the random intercept and slope to correlate improve model fit? ## overall does the model with correlated random intercept and slope ## fit better than a random intercept only model?
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MS1_AdductDbCreate.R \name{prepareCompoundList} \alias{prepareCompoundList} \title{Preparation of compound list} \usage{ prepareCompoundList(compoundList, adductList = NA, rt = FALSE, ccs = FALSE, extId = FALSE) } \arguments{ \item{compoundList}{List of compounds that shall be added to DB} \item{adductList}{Vector with adducts that shall be covered in the DB.} \item{rt}{Boolean value indicating if compound list contains RT data} \item{ccs}{Boolean value indicating if compound list contains CCS data} } \value{ Returns a data frame suitable for upload to a SQLite DB. } \description{ A compound list that can be used with masstrixR can be generated on the fly. Minimum input is a data frame with the following columns: metabolite id ($id), SMILES ($smiles), InChI ($inchi), InChIKey ($inchikey), formula ($formula) metabolite name ($name) and an exact mass ($exactmass). Furthermore, the adducts that shall be covered in the DB have to be defined. This can be either done by using an list of adducts or supplying a adduct definition for each metabolite with an additiona adduct column ($adducts). If it is intended to perform retetion time and collisional cross section matching columns containing this information are required ($rt and $ccs). In case of CCS matching, individual adduct rules are required since each adduct has a different CCS value. Examples for each input are found in the examples in the vignettes. } \examples{ prepareCompoundList(compoundList, adducts = c("M+H", "M+Na")) } \seealso{ \code{\link{validateCompoundList}} \code{\link{createDb}} } \author{ Michael Witting, \email{michael.witting@helmholtz-muenchen.de} }
/man/prepareCompoundList.Rd
no_license
AspirinCode/masstrixR
R
false
true
1,729
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MS1_AdductDbCreate.R \name{prepareCompoundList} \alias{prepareCompoundList} \title{Preparation of compound list} \usage{ prepareCompoundList(compoundList, adductList = NA, rt = FALSE, ccs = FALSE, extId = FALSE) } \arguments{ \item{compoundList}{List of compounds that shall be added to DB} \item{adductList}{Vector with adducts that shall be covered in the DB.} \item{rt}{Boolean value indicating if compound list contains RT data} \item{ccs}{Boolean value indicating if compound list contains CCS data} } \value{ Returns a data frame suitable for upload to a SQLite DB. } \description{ A compound list that can be used with masstrixR can be generated on the fly. Minimum input is a data frame with the following columns: metabolite id ($id), SMILES ($smiles), InChI ($inchi), InChIKey ($inchikey), formula ($formula) metabolite name ($name) and an exact mass ($exactmass). Furthermore, the adducts that shall be covered in the DB have to be defined. This can be either done by using an list of adducts or supplying a adduct definition for each metabolite with an additiona adduct column ($adducts). If it is intended to perform retetion time and collisional cross section matching columns containing this information are required ($rt and $ccs). In case of CCS matching, individual adduct rules are required since each adduct has a different CCS value. Examples for each input are found in the examples in the vignettes. } \examples{ prepareCompoundList(compoundList, adducts = c("M+H", "M+Na")) } \seealso{ \code{\link{validateCompoundList}} \code{\link{createDb}} } \author{ Michael Witting, \email{michael.witting@helmholtz-muenchen.de} }
#"~/Nakano_RNAseq/network_analysis/script/tGRN_analysis20181023/motif/tGRN_MCLNumDEGs_multifasta.R" ########################################### #upstream_500 <- read.table(file = "~/bigdata/yasue/motif/TAIR10_upstream_500_20101028", fill = T, sep = ",", stringsAsFactors = F) #upstream_500 <- as.character(unlist(upstream_500)) temp <- grep("chr", upstream_500) AGI <- c() i <- 1 total <- length(temp) for(i in i:total){ AGI <- c(AGI, substr(upstream_500[temp[i]], 2, 10)) print(i) i <- i+1 } #最後の一周だけ自動化できなかった。 allsequence <- list() presequence <- c() sequence <- c() i <- 1 total <- length(temp) for(i in i:c(total-1)){ presequence <- upstream_500[c(temp[i]+1):c(temp[i+1]-1)] n <- 1 for(n in n:length(presequence)){ sequence <- paste0(sequence, presequence[n]) n <- n+1 } allsequence <- c(allsequence, list(sequence)) sequence <- c() print(i) i <- i+1 } #残りの最後の一周を追加 presequence <- upstream_500[c(temp[total]+1):length(upstream_500)] sequence <- paste0(sequence, presequence[n]) allsequence <- c(allsequence, list(sequence)) #data.frame up_500bp <- data.frame(AGI = AGI, sequence = unlist(allsequence), stringsAsFactors = F ) #####################対象の遺伝子群を引っ張ってくる####################### load("~/bigdata/yasue/tGRN_Groping/inflation4/MasterTable_inflation4.RData") t <- proc.time() library("stringr") temp <- MasterTable[!is.na(MasterTable$MCLNum), ] T_AGI <- c() T_data <- c() T_control <- c() T_control_data <- c() total_i <- length(unique(temp$MCLNum[!is.na(temp$MCLNum)])) i <- 1 for(i in i:total_i){ T_AGI <- filter(temp, MCLNum == i)[, "AGI"] T_data <- up_500bp[match(T_AGI, up_500bp$AGI), ] T_control <- sample(up_500bp$AGI, length(T_AGI)) T_control_data <- up_500bp[match(T_control, up_500bp$AGI), ] fasta <- c() allfasta <- c() cont_fasta <- c() cont_allfasta <- c() total_o <- nrow(T_data) o <- 1 for(o in o:total_o){ fasta <- rbind(str_sub(T_data[, "sequence"][o], start=1, end=80), str_sub(T_data[, "sequence"][o], start=81, end=160), str_sub(T_data[, "sequence"][o], start=161, end=240), str_sub(T_data[, "sequence"][o], start=241, end=320), str_sub(T_data[, "sequence"][o], start=321, end=400), str_sub(T_data[, "sequence"][o], start=401, end=480), str_sub(T_data[, "sequence"][o], start=481, end=500) ) cont_fasta <- rbind(str_sub(T_control_data[, "sequence"][o], start=1, end=80), str_sub(T_control_data[, "sequence"][o], start=81, end=160), str_sub(T_control_data[, "sequence"][o], start=161, end=240), str_sub(T_control_data[, "sequence"][o], start=241, end=320), str_sub(T_control_data[, "sequence"][o], start=321, end=400), str_sub(T_control_data[, "sequence"][o], start=401, end=480), str_sub(T_control_data[, "sequence"][o], start=481, end=500) ) data_fastaAGI <- paste0(">", T_data[, "AGI"][o]) control_fastaAGI <- paste0(">", T_control_data[, "AGI"][o]) allfasta <- c(allfasta, rbind(data_fastaAGI, fasta)) cont_allfasta <- c(cont_allfasta, rbind(control_fastaAGI, cont_fasta)) o <- o+1 } target <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/motif/multi-fasta/target/", "tGRN_MCLNum", i, "_upstream500.fasta") control <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/motif/multi-fasta/control/", "control_tGRN_MCLNum", i, "_upstream500.fasta") write.table(allfasta, file = target, append = F, quote = F, sep = "\t", row.names = F, col.names = F) write.table(cont_allfasta, file = control, append = F, quote = F, sep = "\t", row.names = F, col.names = F) print(i) i <- i+1 } t1 <- proc.time() - t print(t1)
/tGRN_MCLNumDEGs_multifasta.R
no_license
YKeito/tGRN_analysis20181023
R
false
false
3,938
r
#"~/Nakano_RNAseq/network_analysis/script/tGRN_analysis20181023/motif/tGRN_MCLNumDEGs_multifasta.R" ########################################### #upstream_500 <- read.table(file = "~/bigdata/yasue/motif/TAIR10_upstream_500_20101028", fill = T, sep = ",", stringsAsFactors = F) #upstream_500 <- as.character(unlist(upstream_500)) temp <- grep("chr", upstream_500) AGI <- c() i <- 1 total <- length(temp) for(i in i:total){ AGI <- c(AGI, substr(upstream_500[temp[i]], 2, 10)) print(i) i <- i+1 } #最後の一周だけ自動化できなかった。 allsequence <- list() presequence <- c() sequence <- c() i <- 1 total <- length(temp) for(i in i:c(total-1)){ presequence <- upstream_500[c(temp[i]+1):c(temp[i+1]-1)] n <- 1 for(n in n:length(presequence)){ sequence <- paste0(sequence, presequence[n]) n <- n+1 } allsequence <- c(allsequence, list(sequence)) sequence <- c() print(i) i <- i+1 } #残りの最後の一周を追加 presequence <- upstream_500[c(temp[total]+1):length(upstream_500)] sequence <- paste0(sequence, presequence[n]) allsequence <- c(allsequence, list(sequence)) #data.frame up_500bp <- data.frame(AGI = AGI, sequence = unlist(allsequence), stringsAsFactors = F ) #####################対象の遺伝子群を引っ張ってくる####################### load("~/bigdata/yasue/tGRN_Groping/inflation4/MasterTable_inflation4.RData") t <- proc.time() library("stringr") temp <- MasterTable[!is.na(MasterTable$MCLNum), ] T_AGI <- c() T_data <- c() T_control <- c() T_control_data <- c() total_i <- length(unique(temp$MCLNum[!is.na(temp$MCLNum)])) i <- 1 for(i in i:total_i){ T_AGI <- filter(temp, MCLNum == i)[, "AGI"] T_data <- up_500bp[match(T_AGI, up_500bp$AGI), ] T_control <- sample(up_500bp$AGI, length(T_AGI)) T_control_data <- up_500bp[match(T_control, up_500bp$AGI), ] fasta <- c() allfasta <- c() cont_fasta <- c() cont_allfasta <- c() total_o <- nrow(T_data) o <- 1 for(o in o:total_o){ fasta <- rbind(str_sub(T_data[, "sequence"][o], start=1, end=80), str_sub(T_data[, "sequence"][o], start=81, end=160), str_sub(T_data[, "sequence"][o], start=161, end=240), str_sub(T_data[, "sequence"][o], start=241, end=320), str_sub(T_data[, "sequence"][o], start=321, end=400), str_sub(T_data[, "sequence"][o], start=401, end=480), str_sub(T_data[, "sequence"][o], start=481, end=500) ) cont_fasta <- rbind(str_sub(T_control_data[, "sequence"][o], start=1, end=80), str_sub(T_control_data[, "sequence"][o], start=81, end=160), str_sub(T_control_data[, "sequence"][o], start=161, end=240), str_sub(T_control_data[, "sequence"][o], start=241, end=320), str_sub(T_control_data[, "sequence"][o], start=321, end=400), str_sub(T_control_data[, "sequence"][o], start=401, end=480), str_sub(T_control_data[, "sequence"][o], start=481, end=500) ) data_fastaAGI <- paste0(">", T_data[, "AGI"][o]) control_fastaAGI <- paste0(">", T_control_data[, "AGI"][o]) allfasta <- c(allfasta, rbind(data_fastaAGI, fasta)) cont_allfasta <- c(cont_allfasta, rbind(control_fastaAGI, cont_fasta)) o <- o+1 } target <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/motif/multi-fasta/target/", "tGRN_MCLNum", i, "_upstream500.fasta") control <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/motif/multi-fasta/control/", "control_tGRN_MCLNum", i, "_upstream500.fasta") write.table(allfasta, file = target, append = F, quote = F, sep = "\t", row.names = F, col.names = F) write.table(cont_allfasta, file = control, append = F, quote = F, sep = "\t", row.names = F, col.names = F) print(i) i <- i+1 } t1 <- proc.time() - t print(t1)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/arqasgui.R \name{exportToUI} \alias{exportToUI} \title{Exports a function to the UI} \usage{ exportToUI(fun, name, args, class) } \arguments{ \item{fun}{Function of the model} \item{name}{Name of the model} \item{args}{Type of each parameter of the function (numerical, character, vector, matrix)} \item{class}{A string to agrupate funtions in the same menu option} } \description{ Exports a function to the UI }
/man/exportToUI.Rd
no_license
vishkey/arqasgui
R
false
false
503
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/arqasgui.R \name{exportToUI} \alias{exportToUI} \title{Exports a function to the UI} \usage{ exportToUI(fun, name, args, class) } \arguments{ \item{fun}{Function of the model} \item{name}{Name of the model} \item{args}{Type of each parameter of the function (numerical, character, vector, matrix)} \item{class}{A string to agrupate funtions in the same menu option} } \description{ Exports a function to the UI }
# Chapter 8 Lab: Decision Trees # Fitting Classification Trees library(tree) library(ISLR) attach(Carseats) High=ifelse(Sales<=8,"No","Yes") Carseats=data.frame(Carseats,High) head(Caravan) tree.carseats=tree(High~.-Sales,Carseats) summary(tree.carseats) plot(tree.carseats) text(tree.carseats,pretty=0) tree.carseats set.seed(2) train=sample(1:nrow(Carseats), 200) Carseats.test=Carseats[-train,] High.test=High[-train] tree.carseats=tree(High~.-Sales,Carseats,subset=train) tree.pred=predict(tree.carseats,Carseats.test,type="class") table(tree.pred,High.test) (86+57)/200 set.seed(3) cv.carseats=cv.tree(tree.carseats,FUN=prune.misclass) names(cv.carseats) cv.carseats par(mfrow=c(1,2)) plot(cv.carseats$size,cv.carseats$dev,type="b") plot(cv.carseats$k,cv.carseats$dev,type="b") prune.carseats=prune.misclass(tree.carseats,best=9) plot(prune.carseats) text(prune.carseats,pretty=0) tree.pred=predict(prune.carseats,Carseats.test,type="class") table(tree.pred,High.test) (94+60)/200 prune.carseats=prune.misclass(tree.carseats,best=15) plot(prune.carseats) text(prune.carseats,pretty=0) tree.pred=predict(prune.carseats,Carseats.test,type="class") table(tree.pred,High.test) (86+62)/200 # Fitting Regression Trees library(MASS) set.seed(1) train = sample(1:nrow(Boston), nrow(Boston)/2) tree.boston=tree(medv~.,Boston,subset=train) summary(tree.boston) plot(tree.boston) text(tree.boston,pretty=0) cv.boston=cv.tree(tree.boston) plot(cv.boston$size,cv.boston$dev,type='b') prune.boston=prune.tree(tree.boston,best=5) plot(prune.boston) text(prune.boston,pretty=0) yhat=predict(tree.boston,newdata=Boston[-train,]) boston.test=Boston[-train,"medv"] plot(yhat,boston.test) abline(0,1) mean((yhat-boston.test)^2) # Bagging and Random Forests library(randomForest) set.seed(1) bag.boston=randomForest(medv~.,data=Boston,subset=train,mtry=13,importance=TRUE) bag.boston yhat.bag = predict(bag.boston,newdata=Boston[-train,]) plot(yhat.bag, boston.test) abline(0,1) mean((yhat.bag-boston.test)^2) bag.boston=randomForest(medv~.,data=Boston,subset=train,mtry=13,ntree=25) yhat.bag = predict(bag.boston,newdata=Boston[-train,]) mean((yhat.bag-boston.test)^2) set.seed(1) rf.boston=randomForest(medv~.,data=Boston,subset=train,mtry=6,importance=TRUE) yhat.rf = predict(rf.boston,newdata=Boston[-train,]) mean((yhat.rf-boston.test)^2) importance(rf.boston) varImpPlot(rf.boston) # Boosting library(gbm) set.seed(1) boost.boston=gbm(medv~.,data=Boston[train,],distribution="gaussian",n.trees=5000,interaction.depth=4) summary(boost.boston) par(mfrow=c(1,2)) plot(boost.boston,i="rm") plot(boost.boston,i="lstat") yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000) mean((yhat.boost-boston.test)^2) boost.boston=gbm(medv~.,data=Boston[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F) yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000) mean((yhat.boost-boston.test)^2)
/ch8.r
no_license
codingfinance/ISLR
R
false
false
2,944
r
# Chapter 8 Lab: Decision Trees # Fitting Classification Trees library(tree) library(ISLR) attach(Carseats) High=ifelse(Sales<=8,"No","Yes") Carseats=data.frame(Carseats,High) head(Caravan) tree.carseats=tree(High~.-Sales,Carseats) summary(tree.carseats) plot(tree.carseats) text(tree.carseats,pretty=0) tree.carseats set.seed(2) train=sample(1:nrow(Carseats), 200) Carseats.test=Carseats[-train,] High.test=High[-train] tree.carseats=tree(High~.-Sales,Carseats,subset=train) tree.pred=predict(tree.carseats,Carseats.test,type="class") table(tree.pred,High.test) (86+57)/200 set.seed(3) cv.carseats=cv.tree(tree.carseats,FUN=prune.misclass) names(cv.carseats) cv.carseats par(mfrow=c(1,2)) plot(cv.carseats$size,cv.carseats$dev,type="b") plot(cv.carseats$k,cv.carseats$dev,type="b") prune.carseats=prune.misclass(tree.carseats,best=9) plot(prune.carseats) text(prune.carseats,pretty=0) tree.pred=predict(prune.carseats,Carseats.test,type="class") table(tree.pred,High.test) (94+60)/200 prune.carseats=prune.misclass(tree.carseats,best=15) plot(prune.carseats) text(prune.carseats,pretty=0) tree.pred=predict(prune.carseats,Carseats.test,type="class") table(tree.pred,High.test) (86+62)/200 # Fitting Regression Trees library(MASS) set.seed(1) train = sample(1:nrow(Boston), nrow(Boston)/2) tree.boston=tree(medv~.,Boston,subset=train) summary(tree.boston) plot(tree.boston) text(tree.boston,pretty=0) cv.boston=cv.tree(tree.boston) plot(cv.boston$size,cv.boston$dev,type='b') prune.boston=prune.tree(tree.boston,best=5) plot(prune.boston) text(prune.boston,pretty=0) yhat=predict(tree.boston,newdata=Boston[-train,]) boston.test=Boston[-train,"medv"] plot(yhat,boston.test) abline(0,1) mean((yhat-boston.test)^2) # Bagging and Random Forests library(randomForest) set.seed(1) bag.boston=randomForest(medv~.,data=Boston,subset=train,mtry=13,importance=TRUE) bag.boston yhat.bag = predict(bag.boston,newdata=Boston[-train,]) plot(yhat.bag, boston.test) abline(0,1) mean((yhat.bag-boston.test)^2) bag.boston=randomForest(medv~.,data=Boston,subset=train,mtry=13,ntree=25) yhat.bag = predict(bag.boston,newdata=Boston[-train,]) mean((yhat.bag-boston.test)^2) set.seed(1) rf.boston=randomForest(medv~.,data=Boston,subset=train,mtry=6,importance=TRUE) yhat.rf = predict(rf.boston,newdata=Boston[-train,]) mean((yhat.rf-boston.test)^2) importance(rf.boston) varImpPlot(rf.boston) # Boosting library(gbm) set.seed(1) boost.boston=gbm(medv~.,data=Boston[train,],distribution="gaussian",n.trees=5000,interaction.depth=4) summary(boost.boston) par(mfrow=c(1,2)) plot(boost.boston,i="rm") plot(boost.boston,i="lstat") yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000) mean((yhat.boost-boston.test)^2) boost.boston=gbm(medv~.,data=Boston[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F) yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000) mean((yhat.boost-boston.test)^2)
#' Example data set #' #' "pbmc_small" #' Example data set 2 #' #' "cell_type_df" #' Example data set 2 #' #' "pbmc_small_nested_interactions"
/R/data.R
no_license
idelvalle/tidyseurat
R
false
false
144
r
#' Example data set #' #' "pbmc_small" #' Example data set 2 #' #' "cell_type_df" #' Example data set 2 #' #' "pbmc_small_nested_interactions"
### This function will do the same as above, but uses the Double Mad Cap #' @title FUNCTION_TITLE #' @description FUNCTION_DESCRIPTION #' @param data PARAM_DESCRIPTION #' @param chaos_pc PARAM_DESCRIPTION, Default: 10 #' @param iso_portion PARAM_DESCRIPTION, Default: 1 #' @param metadata PARAM_DESCRIPTION, Default: ensemble_metadata #' @param region_portion PARAM_DESCRIPTION, Default: 0 #' @param super_region_portion PARAM_DESCRIPTION, Default: 0 #' @param global_portion PARAM_DESCRIPTION, Default: 0 #' @param N_draws PARAM_DESCRIPTION, Default: 1000 #' @return OUTPUT_DESCRIPTION #' @details DETAILS #' @examples #' \dontrun{ #' if(interactive()){ #' #EXAMPLE1 #' } #' } #' @rdname chaos_draw_distro_DMC #' @export chaos_draw_distro_DMC <- function(data, chaos_pc = 10, iso_portion = 1, metadata = ensemble_metadata, region_portion = 0, super_region_portion = 0, global_portion = 0, N_draws = 1000) { # data = copy(data_models_passed) # chaos_pc <- 10 # iso_portion = 2/3; region_portion = 1/3; super_region_portion = 0; global_portion = 0 ## First, stop if the portion sums are greater than 1 if (region_portion + iso_portion + super_region_portion + global_portion > 1) { stop("RMSE fractions are > 1.") } print("Computing the weighted RMSE by country-region-SR-global portions") data[, rmse_WREG := (region_portion * rmse_REG) + (iso_portion * rmse_iso) + (super_region_portion * rmse_SRREG) + (global_portion * rmse_global)] print("Rank each submodel by iso3/region/sr specific by iso3 and OOS year") data <- data[!is.na(rmse_WREG), iso_rank := frank(rmse_WREG, ties.method = "first"), by = c("iso3", "oos")] data <- data[order(iso3, oos, iso_rank)] ## Drop if rmse_iso is missing data <- data[!is.na(rmse_WREG)] ## What's the highest rank for each? We want the chaos% (and respective draws for each) #### NOTE: We use the length as opposed to the max, because that will allow us to filter by criteria ##### and therefore, we can get a higher number of draws to use by filtering out ## We also wanna drop if RMSE is NA (because those models weren't picked up through virtue of missing vals) ## Find the cutoff number of models ## What's the max number of models per country we got data[, max_rank := max(iso_rank, na.rm = T), c("iso3")] ## What's the Chaos% cutoff for each country? print(paste0("Keep only the top ", chaos_pc, "% models")) data[, draw_cutoff := ceiling(chaos_pc / 100 * max_rank)] print(paste0("Countries where draw_cutoff is <=", chaos_pc, ", we bump it up to the minimum of ", chaos_pc, "or the max_rank")) data[draw_cutoff <= chaos_pc, draw_cutoff := min(chaos_pc, max_rank, na.rm = T), by = c("iso3")] ## Cut off the models passed beyond the cutoff value data <- data[iso_rank <= draw_cutoff] ### What's the number of draws per country-model ? data[, draw_num := ceiling(N_draws / draw_cutoff)] #### ORDER FEs in the order of ensemble grid !!!! #### if (!is.null(metadata$xvar)[1]) { order_of_FEs <- metadata$xvar ## Get no FE col names data_cols_no_FE <- setdiff(colnames(data), order_of_FEs) ## Get FE cols in data and drop the ones where the FE did not pass whatsoever FE_data_cols <- colnames(data)[grep("^FE_", colnames(data))] ## Drop from order_of_FEs (use intersect of large with small) order_of_FEs <- intersect(order_of_FEs, FE_data_cols) ## Reorder data setcolorder(data, neworder = c(order_of_FEs, data_cols_no_FE)) } print("Making draws:") ## Let's get the unique models (with the max number of draws) remaining we will create draws over models_remaining <- unique(data[, .(draw_num, model_number, yvar)]) models_remaining[, drawz := max(draw_num), "model_number"] models_remaining <- models_remaining[drawz == draw_num] models_remaining[, draw_num := NULL] ## Create task ID (1:n) and number of draws array_grid <- copy(models_remaining) array_grid <- array_grid[, id := .I] print("Compute the MAD cap from ALL the MADs if we want to cap our residual errors") data[, diff_mad_cap := (qnorm(0.90) / .6745) * mad(diff_mad) + median(diff_mad)] data[ diff_mad > diff_mad_cap, diff_mad := diff_mad_cap] data[, level_mad_cap := (qnorm(0.90) / .6745) * mad(level_mad) + median(level_mad)] data[ level_mad > level_mad_cap, level_mad := level_mad_cap] ## Extract the MAD cap scalars diff_mad_cap <- data[1, diff_mad_cap] level_mad_cap <- data[1, level_mad_cap] return(list( array_grid = array_grid, data = data, level_mad_cap = level_mad_cap, diff_mad_cap = diff_mad_cap, N_draws = N_draws )) }
/AFModel/R/chaos_draw_distro_DMC.R
permissive
sadatnfs/AFModel
R
false
false
4,692
r
### This function will do the same as above, but uses the Double Mad Cap #' @title FUNCTION_TITLE #' @description FUNCTION_DESCRIPTION #' @param data PARAM_DESCRIPTION #' @param chaos_pc PARAM_DESCRIPTION, Default: 10 #' @param iso_portion PARAM_DESCRIPTION, Default: 1 #' @param metadata PARAM_DESCRIPTION, Default: ensemble_metadata #' @param region_portion PARAM_DESCRIPTION, Default: 0 #' @param super_region_portion PARAM_DESCRIPTION, Default: 0 #' @param global_portion PARAM_DESCRIPTION, Default: 0 #' @param N_draws PARAM_DESCRIPTION, Default: 1000 #' @return OUTPUT_DESCRIPTION #' @details DETAILS #' @examples #' \dontrun{ #' if(interactive()){ #' #EXAMPLE1 #' } #' } #' @rdname chaos_draw_distro_DMC #' @export chaos_draw_distro_DMC <- function(data, chaos_pc = 10, iso_portion = 1, metadata = ensemble_metadata, region_portion = 0, super_region_portion = 0, global_portion = 0, N_draws = 1000) { # data = copy(data_models_passed) # chaos_pc <- 10 # iso_portion = 2/3; region_portion = 1/3; super_region_portion = 0; global_portion = 0 ## First, stop if the portion sums are greater than 1 if (region_portion + iso_portion + super_region_portion + global_portion > 1) { stop("RMSE fractions are > 1.") } print("Computing the weighted RMSE by country-region-SR-global portions") data[, rmse_WREG := (region_portion * rmse_REG) + (iso_portion * rmse_iso) + (super_region_portion * rmse_SRREG) + (global_portion * rmse_global)] print("Rank each submodel by iso3/region/sr specific by iso3 and OOS year") data <- data[!is.na(rmse_WREG), iso_rank := frank(rmse_WREG, ties.method = "first"), by = c("iso3", "oos")] data <- data[order(iso3, oos, iso_rank)] ## Drop if rmse_iso is missing data <- data[!is.na(rmse_WREG)] ## What's the highest rank for each? We want the chaos% (and respective draws for each) #### NOTE: We use the length as opposed to the max, because that will allow us to filter by criteria ##### and therefore, we can get a higher number of draws to use by filtering out ## We also wanna drop if RMSE is NA (because those models weren't picked up through virtue of missing vals) ## Find the cutoff number of models ## What's the max number of models per country we got data[, max_rank := max(iso_rank, na.rm = T), c("iso3")] ## What's the Chaos% cutoff for each country? print(paste0("Keep only the top ", chaos_pc, "% models")) data[, draw_cutoff := ceiling(chaos_pc / 100 * max_rank)] print(paste0("Countries where draw_cutoff is <=", chaos_pc, ", we bump it up to the minimum of ", chaos_pc, "or the max_rank")) data[draw_cutoff <= chaos_pc, draw_cutoff := min(chaos_pc, max_rank, na.rm = T), by = c("iso3")] ## Cut off the models passed beyond the cutoff value data <- data[iso_rank <= draw_cutoff] ### What's the number of draws per country-model ? data[, draw_num := ceiling(N_draws / draw_cutoff)] #### ORDER FEs in the order of ensemble grid !!!! #### if (!is.null(metadata$xvar)[1]) { order_of_FEs <- metadata$xvar ## Get no FE col names data_cols_no_FE <- setdiff(colnames(data), order_of_FEs) ## Get FE cols in data and drop the ones where the FE did not pass whatsoever FE_data_cols <- colnames(data)[grep("^FE_", colnames(data))] ## Drop from order_of_FEs (use intersect of large with small) order_of_FEs <- intersect(order_of_FEs, FE_data_cols) ## Reorder data setcolorder(data, neworder = c(order_of_FEs, data_cols_no_FE)) } print("Making draws:") ## Let's get the unique models (with the max number of draws) remaining we will create draws over models_remaining <- unique(data[, .(draw_num, model_number, yvar)]) models_remaining[, drawz := max(draw_num), "model_number"] models_remaining <- models_remaining[drawz == draw_num] models_remaining[, draw_num := NULL] ## Create task ID (1:n) and number of draws array_grid <- copy(models_remaining) array_grid <- array_grid[, id := .I] print("Compute the MAD cap from ALL the MADs if we want to cap our residual errors") data[, diff_mad_cap := (qnorm(0.90) / .6745) * mad(diff_mad) + median(diff_mad)] data[ diff_mad > diff_mad_cap, diff_mad := diff_mad_cap] data[, level_mad_cap := (qnorm(0.90) / .6745) * mad(level_mad) + median(level_mad)] data[ level_mad > level_mad_cap, level_mad := level_mad_cap] ## Extract the MAD cap scalars diff_mad_cap <- data[1, diff_mad_cap] level_mad_cap <- data[1, level_mad_cap] return(list( array_grid = array_grid, data = data, level_mad_cap = level_mad_cap, diff_mad_cap = diff_mad_cap, N_draws = N_draws )) }
tweets <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-06-15/tweets.csv') library(tidyverse) library(tidytext) library(textdata) library(wordcloud) library(wordcloud2) library(dplyr) library(patchwork) library(ggimage) library(cowplot) most_traction <- tweets %>% as_tibble() %>% unnest_tokens(word, content) #remove words that aren't useful like "to" and "of" most_traction <- most_traction %>% anti_join(stop_words) #most common words in titles test <- most_traction %>% count(word, sort = TRUE) test %>% filter(n>500) %>% ggplot(aes(n,word)) +geom_col() #get joy lexicon nrc_joy <- get_sentiments("nrc") %>% filter(sentiment == "joy") #positive words joy_words <- most_traction %>%inner_join(nrc_joy) %>% count(word, sort = TRUE) nrc_disgust <- get_sentiments("nrc") %>% filter(sentiment == "disgust") #disgust words disgust_words <- most_traction %>%inner_join(nrc_disgust) %>% count(word, sort = TRUE) wordcloud2(joy_words,shape ="circle", color = "goldenrod",size=0.5,backgroundColor = "bisque") wordcloud2(disgust_words,shape ="circle", color = "tan",size=0.5,backgroundColor = "bisque") ggplot()+draw_image( image = "/Users/thivina/Documents/TidyTuesday-06-15/img/joy.png", x=0,y=0,width = 1, height=0.6) +draw_image( image = "/Users/thivina/Documents/TidyTuesday-06-15/img/disgust.png", x=0,y=0.5,width = 1, height=0.6)
/tweets_tt.R
no_license
vina1998/TidyTuesday-06-15
R
false
false
1,400
r
tweets <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-06-15/tweets.csv') library(tidyverse) library(tidytext) library(textdata) library(wordcloud) library(wordcloud2) library(dplyr) library(patchwork) library(ggimage) library(cowplot) most_traction <- tweets %>% as_tibble() %>% unnest_tokens(word, content) #remove words that aren't useful like "to" and "of" most_traction <- most_traction %>% anti_join(stop_words) #most common words in titles test <- most_traction %>% count(word, sort = TRUE) test %>% filter(n>500) %>% ggplot(aes(n,word)) +geom_col() #get joy lexicon nrc_joy <- get_sentiments("nrc") %>% filter(sentiment == "joy") #positive words joy_words <- most_traction %>%inner_join(nrc_joy) %>% count(word, sort = TRUE) nrc_disgust <- get_sentiments("nrc") %>% filter(sentiment == "disgust") #disgust words disgust_words <- most_traction %>%inner_join(nrc_disgust) %>% count(word, sort = TRUE) wordcloud2(joy_words,shape ="circle", color = "goldenrod",size=0.5,backgroundColor = "bisque") wordcloud2(disgust_words,shape ="circle", color = "tan",size=0.5,backgroundColor = "bisque") ggplot()+draw_image( image = "/Users/thivina/Documents/TidyTuesday-06-15/img/joy.png", x=0,y=0,width = 1, height=0.6) +draw_image( image = "/Users/thivina/Documents/TidyTuesday-06-15/img/disgust.png", x=0,y=0.5,width = 1, height=0.6)
#' @param ope the number of observations per 'epoch'. For convenience of #' interpretation, The Sharpe ratio is typically quoted in 'annualized' #' units for some epoch, that is, 'per square root epoch', though returns #' are observed at a frequency of \code{ope} per epoch. #' The default value is 1, meaning the code will not attempt to guess #' what the observation frequency is, and no annualization adjustments #' will be made. #' @seealso \code{\link{reannualize}}
/man-roxygen/param-ope.R
no_license
shabbychef/SharpeR
R
false
false
486
r
#' @param ope the number of observations per 'epoch'. For convenience of #' interpretation, The Sharpe ratio is typically quoted in 'annualized' #' units for some epoch, that is, 'per square root epoch', though returns #' are observed at a frequency of \code{ope} per epoch. #' The default value is 1, meaning the code will not attempt to guess #' what the observation frequency is, and no annualization adjustments #' will be made. #' @seealso \code{\link{reannualize}}
env = read.csv('./data/tgp_utm_env_complete.csv') env = env[env$repeat_plot == 1, ] comm = read.csv('./data/tgp_comm_mat_all.csv') comm = comm[match(env$plot_yr, comm$plot.yr), ] ## drop species that don't occur comm = comm[ , colSums(comm) > 0 ] row.names(comm) = comm$plot.yr comm = comm[ , -1] comm_sqr = sqrt(comm) env$sr = rowSums(comm > 0) ## create explanatory modeling variables ## soil variables soil_vars = c("P","CA","MG","K","NA.","B","FE","MN","CU","ZN","AL") soil_vars = paste('log', soil_vars, sep='') soil_pca = princomp(scale(env[ , soil_vars])) soil_mat = as.data.frame(soil_pca$scores[ , 1:3]) ## rain variables env$rain2 = ifelse(is.na(env$rain2), 0, env$rain2) sum_rain = apply(env[ , paste('rain', 6:9, sep='')], 1, sum) win_rain = apply(env[ , paste('rain', c(10:12, 1), sep='')], 1, sum) spr_rain = apply(env[ , paste('rain', 2:5, sep='')], 1, sum) rain_mat = cbind(sum_rain, win_rain, spr_rain) ## management variables mang_vars = c('YrsOB', 'BP5Yrs', 'YrsSLB') mang_mat = env[ , mang_vars] ## site and year dummy variables plot_id = sort(unique(env$plot)) year_id = sort(unique(env$yr)) plot_mat = matrix(0, ncol=length(plot_id), nrow=nrow(env)) year_mat = matrix(0, ncol=length(year_id), nrow=nrow(env)) for(i in 1:nrow(env)) { plot_mat[i, match(env$plot[i], plot_id)] = 1 year_mat[i, match(env$yr[i], year_id)] = 1 } ## drop first columns so no singular variables in models plot_mat = plot_mat[ , -1] year_mat = year_mat[ , -1]
/scripts/tgp_repeat_data_import.R
no_license
mcglinnlab/tgp_management
R
false
false
1,473
r
env = read.csv('./data/tgp_utm_env_complete.csv') env = env[env$repeat_plot == 1, ] comm = read.csv('./data/tgp_comm_mat_all.csv') comm = comm[match(env$plot_yr, comm$plot.yr), ] ## drop species that don't occur comm = comm[ , colSums(comm) > 0 ] row.names(comm) = comm$plot.yr comm = comm[ , -1] comm_sqr = sqrt(comm) env$sr = rowSums(comm > 0) ## create explanatory modeling variables ## soil variables soil_vars = c("P","CA","MG","K","NA.","B","FE","MN","CU","ZN","AL") soil_vars = paste('log', soil_vars, sep='') soil_pca = princomp(scale(env[ , soil_vars])) soil_mat = as.data.frame(soil_pca$scores[ , 1:3]) ## rain variables env$rain2 = ifelse(is.na(env$rain2), 0, env$rain2) sum_rain = apply(env[ , paste('rain', 6:9, sep='')], 1, sum) win_rain = apply(env[ , paste('rain', c(10:12, 1), sep='')], 1, sum) spr_rain = apply(env[ , paste('rain', 2:5, sep='')], 1, sum) rain_mat = cbind(sum_rain, win_rain, spr_rain) ## management variables mang_vars = c('YrsOB', 'BP5Yrs', 'YrsSLB') mang_mat = env[ , mang_vars] ## site and year dummy variables plot_id = sort(unique(env$plot)) year_id = sort(unique(env$yr)) plot_mat = matrix(0, ncol=length(plot_id), nrow=nrow(env)) year_mat = matrix(0, ncol=length(year_id), nrow=nrow(env)) for(i in 1:nrow(env)) { plot_mat[i, match(env$plot[i], plot_id)] = 1 year_mat[i, match(env$yr[i], year_id)] = 1 } ## drop first columns so no singular variables in models plot_mat = plot_mat[ , -1] year_mat = year_mat[ , -1]
# hw2 ## 8. library(graphics) library(MASS) # install.packages("ISLR") library(ISLR) data(Auto) summary(Auto) # head(Auto) # View(Auto) # mydata <- read.csv(file = "/Users/wei/Desktop/Auto.csv") # head(mydata) # mydata$origin = as.numeric(mydata$origin) # mydata$horsepower = as.numeric(mydata$horsepower) lm.fit = lm(mpg~horsepower, data=Auto) summary(fit) # 8.(a) iv. predict(lm.fit, newdata = data.frame(horsepower=c(98)), interval="confidence") predict(lm.fit, newdata = data.frame(horsepower=c(98)), interval="prediction") # 8.(b) plot(Auto$horsepower, Auto$mpg, main="Scatterplot",ylab="mpg",xlab="horsepower") abline(lm.fit, lwd=3, col="red") # 8.(c) par(mfrow=c(2,2)) plot(lm.fit) ## 9.(a) plot(Auto) # 9.(b) cor(Auto[, !(names(Auto)=="name")]) # 9.(c) lm.fit = lm(mpg~.-name, data=Auto) summary(lm.fit) # 9.(d) par(mfrow=c(2,2)) plot(lm.fit) # 9.(e) lm.fit.inter = lm(mpg ~ (.-name)*(.-name), data=Auto) summary(lm.fit.inter) # 9.(f) Auto_2 <- Auto head(Auto_2) # 平方 Auto_2[2:8] <- lapply(Auto_2[2:8], function(x) x^2) colnames(Auto_2) <- c("mpg", "cylinders^2", "displacement^2", "horsepower^2", "weight^2", "acceleration^2", "year^2", "origin^2","name") head(Auto_2) lm.fit = lm(mpg ~ (.-name), data=Auto_2) summary(lm.fit) Auto_3 <- Auto # 取log Auto_3[2:8] <- lapply(Auto_3[2:8], function(x) log(x)) colnames(Auto_3) <- c("mpg", "log(cylinders)", "log(displacement)", "log(horsepower)", "log(weight)", "log(acceleration)", "log(year)", "log(origin)","name") head(Auto_3) lm.fit = lm(mpg ~ (.-name), data=Auto_3) summary(lm.fit) Auto_4 <- Auto # 開根號 Auto_4[2:8] <- lapply(Auto_4[2:8], function(x) sqrt(x)) colnames(Auto_4) <- c("mpg", "sqrt(cylinders)", "sqrt(displacement)", "sqrt(horsepower)", "sqrt(weight)", "sqrt(acceleration)", "sqrt(year)", "sqrt(origin)","name") head(Auto_4) lm.fit = lm(mpg ~ (.-name), data=Auto_4) summary(lm.fit) # 10.(a) # install.packages("ISLR") library(ISLR) summary(Carseats) head(Carseats) lm.fit = lm(Sales~Price+Urban+US,data=Carseats) summary(lm.fit) # 10.(e) lm.fit = lm(Sales~Price+US,data=Carseats) summary(lm.fit) # 10.(g) confint(lm.fit) # 10.(h) plot(lm.fit)
/hw2.R
no_license
wwweiwei/Statistical-Learning
R
false
false
2,134
r
# hw2 ## 8. library(graphics) library(MASS) # install.packages("ISLR") library(ISLR) data(Auto) summary(Auto) # head(Auto) # View(Auto) # mydata <- read.csv(file = "/Users/wei/Desktop/Auto.csv") # head(mydata) # mydata$origin = as.numeric(mydata$origin) # mydata$horsepower = as.numeric(mydata$horsepower) lm.fit = lm(mpg~horsepower, data=Auto) summary(fit) # 8.(a) iv. predict(lm.fit, newdata = data.frame(horsepower=c(98)), interval="confidence") predict(lm.fit, newdata = data.frame(horsepower=c(98)), interval="prediction") # 8.(b) plot(Auto$horsepower, Auto$mpg, main="Scatterplot",ylab="mpg",xlab="horsepower") abline(lm.fit, lwd=3, col="red") # 8.(c) par(mfrow=c(2,2)) plot(lm.fit) ## 9.(a) plot(Auto) # 9.(b) cor(Auto[, !(names(Auto)=="name")]) # 9.(c) lm.fit = lm(mpg~.-name, data=Auto) summary(lm.fit) # 9.(d) par(mfrow=c(2,2)) plot(lm.fit) # 9.(e) lm.fit.inter = lm(mpg ~ (.-name)*(.-name), data=Auto) summary(lm.fit.inter) # 9.(f) Auto_2 <- Auto head(Auto_2) # 平方 Auto_2[2:8] <- lapply(Auto_2[2:8], function(x) x^2) colnames(Auto_2) <- c("mpg", "cylinders^2", "displacement^2", "horsepower^2", "weight^2", "acceleration^2", "year^2", "origin^2","name") head(Auto_2) lm.fit = lm(mpg ~ (.-name), data=Auto_2) summary(lm.fit) Auto_3 <- Auto # 取log Auto_3[2:8] <- lapply(Auto_3[2:8], function(x) log(x)) colnames(Auto_3) <- c("mpg", "log(cylinders)", "log(displacement)", "log(horsepower)", "log(weight)", "log(acceleration)", "log(year)", "log(origin)","name") head(Auto_3) lm.fit = lm(mpg ~ (.-name), data=Auto_3) summary(lm.fit) Auto_4 <- Auto # 開根號 Auto_4[2:8] <- lapply(Auto_4[2:8], function(x) sqrt(x)) colnames(Auto_4) <- c("mpg", "sqrt(cylinders)", "sqrt(displacement)", "sqrt(horsepower)", "sqrt(weight)", "sqrt(acceleration)", "sqrt(year)", "sqrt(origin)","name") head(Auto_4) lm.fit = lm(mpg ~ (.-name), data=Auto_4) summary(lm.fit) # 10.(a) # install.packages("ISLR") library(ISLR) summary(Carseats) head(Carseats) lm.fit = lm(Sales~Price+Urban+US,data=Carseats) summary(lm.fit) # 10.(e) lm.fit = lm(Sales~Price+US,data=Carseats) summary(lm.fit) # 10.(g) confint(lm.fit) # 10.(h) plot(lm.fit)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SLR_prediction_vis.R \name{SLR_prediction_vis.default} \alias{SLR_prediction_vis.default} \title{SLR prediction visualization default method} \usage{ \method{SLR_prediction_vis}{default}(y) } \arguments{ \item{y}{an object of a wrong class} } \description{ SLR prediction visualization default method } \author{ Emanuel Sommer }
/man/SLR_prediction_vis.default.Rd
no_license
EmanuelSommer/SLRprediction
R
false
true
407
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SLR_prediction_vis.R \name{SLR_prediction_vis.default} \alias{SLR_prediction_vis.default} \title{SLR prediction visualization default method} \usage{ \method{SLR_prediction_vis}{default}(y) } \arguments{ \item{y}{an object of a wrong class} } \description{ SLR prediction visualization default method } \author{ Emanuel Sommer }
library(dplyr) library(ggplot2) library(nloptr) library(purrr) library(tidyr) # MeanVariance ------------------------------------------------------------ # Tools for computing mean variance cor2cov <- function(cor, v1, v2) { return(cor*(sqrt(v1)*sqrt(v2))) } # Computes portfolio variance portvar <- function(w1, m1, m2, v1, v2, covar) { w2 = 1-w1 return(w1^2*v1 + w2^2*v2 + 2*w1*w2*covar) } # # TODO: check if this derivative is right # portvar_deriv <- function(w1, m1, m2, v1, v2, covar) # { # retrun(100*(2*w1*v1 - 2*(1-w1) - 4*w1*covar)) # } # Conducts the mean variance optimization m_v_opt <- function(mu0, m1, m2, v1, v2, covar) { optout = nloptr(x0 = 0.5, eval_f = {function(w) portvar(w, m1, m2, v1, v2, covar)}, eval_g_ineq = {function(w) mu0 - w*m1 - (1-w)*m2}, opts = list(algorithm = "NLOPT_LN_COBYLA", ftol_abs = 1e-6), ub = 1) return(list(portvol = sqrt(optout$objective), w1 = optout$solution)) } # Plots ------------------------------------------------------------------- # TODO: figure out how to animate this with gg animate? m1 = 0.05 m2 = 0.05 covar = 0 v1 = 0.09 v2 = 0.09 genplot <- function(m1, m2, v1, v2, covar) { # All possible values plotdata = tibble(w1 = seq(0, 1, length.out = 100)) %>% mutate(w2 = 1 - w1, portret = w1*m1 + w2*m2, portsd = sqrt(portvar(w1, m1, m2, v1, v2, covar))) # Efficient frontier eff_portdata = tibble(portret = seq(min(m1, m2), max(m1, m2), length.out = 100)) %>% mutate(opts = map(.x = portret, .f = {function(mu0) m_v_opt(mu0, m1, m2, v1, v2, covar)})) %>% mutate(portsd = map_dbl(opts, {function(x) pluck(x, "portvol")}), w1 = map_dbl(opts, {function(x) pluck(x, "w1")})) # Plot all possible values in color line, efficient frontier in dots plt = ggplot(plotdata, aes(x = portret*100, y = portsd*100)) + geom_line(aes(col = w1), size = 1) + geom_point(aes(col = w1), data = eff_portdata, size = 1.25) + scale_color_viridis_c("a1 Weight") + labs(x = "Portfolio Return", y = "Portfolio Volatility", title = "Accessible Assets with Portfolios: No Leverage") + theme_bw() return(list(data = plotdata, eff_data = eff_portdata,plot = plt)) } # Generates plots with leverage genplot_leverage <- function(m1, m2, v1, v2, covar) { # Lone portfolios with leverage 0 to 2x sd1 = sqrt(v1) sd2 = sqrt(v2) sharpe_1 = m1/sd1 sharpe_2 = m2/sd2 volmax = max(sd1*2, sd2*2) plotdata = tibble(portvol = seq(0, volmax, length.out = 100), portret1 = sharpe_1 * portvol, portret2 = sharpe_2 * portvol) covmat = matrix(c(v1, covar, covar, v2), nrow = 2, ncol = 2, byrow = TRUE) # range of the return - for purposes of plotting volrange = c(0, volmax) # Max sharpe ratio portfolio with leverage wstar = solve(covmat) %*% c(m1, m2) wstar = wstar/sum(wstar) ret_wstar = as.vector(t(wstar) %*% c(m1, m2)) var_wstar = portvar(wstar[1,], m1, m2, v1, v2, covar) sd_wstar = sqrt(var_wstar) sharpe_wstar = ret_wstar/sd_wstar # Add Combined portfolios with leverage plot_all = mutate(plotdata, portretstar = sharpe_wstar*portvol) %>% pivot_longer(cols = -portvol, values_to = "portret") ggplot(plot_all, aes(x = portvol*100, y = portret*100, col = name)) + geom_line(size = 1) + labs(x = "Portfolio Volatility", y = "Portfolio Return", title = "Accessible Assets with Portfolios: With Leverage") + coord_flip() + theme_bw() } # Baseline generated plots # TODO: Stich these together with cowplot?? # TODO: highligt that the optimal weight setwd("C:/Users/jason/Documents/jasonyang5.github.io/content/post/PortfolioSelection/") plot2 = genplot(0.05, 0.03, 0.3^2, 0.15^2, 0) ggsave("plot2.png") plot3 = genplot(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(0.9, 0.3^2, 0.15^2)) ggsave("plot3.png") plot4 = genplot(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(-0.9, 0.3^2, 0.15^2)) ggsave("plot4.png") plot5 = genplot(0.05, -0.005, 0.3^2, 0.15^2, cor2cov(-0.5, 0.3^2, 0.15^2)) ggsave("plot5.png") # Leverage plots plot_lev2 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, 0) ggsave("plot_lev2.png") plot_lev3 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(0.9, 0.3^2, 0.15^2)) ggsave("plot_lev3.png") plot_lev4 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(-0.9, 0.3^2, 0.15^2)) ggsave("plot_lev4.png") plot_lev5 = genplot_leverage(0.05, -0.005, 0.3^2, 0.15^2, cor2cov(-0.5, 0.3^2, 0.15^2)) ggsave("plot_lev5.png")
/content/post/portfolioselection/portfolioselection.R
no_license
jasonyang5/jasonyang5.github.io
R
false
false
4,675
r
library(dplyr) library(ggplot2) library(nloptr) library(purrr) library(tidyr) # MeanVariance ------------------------------------------------------------ # Tools for computing mean variance cor2cov <- function(cor, v1, v2) { return(cor*(sqrt(v1)*sqrt(v2))) } # Computes portfolio variance portvar <- function(w1, m1, m2, v1, v2, covar) { w2 = 1-w1 return(w1^2*v1 + w2^2*v2 + 2*w1*w2*covar) } # # TODO: check if this derivative is right # portvar_deriv <- function(w1, m1, m2, v1, v2, covar) # { # retrun(100*(2*w1*v1 - 2*(1-w1) - 4*w1*covar)) # } # Conducts the mean variance optimization m_v_opt <- function(mu0, m1, m2, v1, v2, covar) { optout = nloptr(x0 = 0.5, eval_f = {function(w) portvar(w, m1, m2, v1, v2, covar)}, eval_g_ineq = {function(w) mu0 - w*m1 - (1-w)*m2}, opts = list(algorithm = "NLOPT_LN_COBYLA", ftol_abs = 1e-6), ub = 1) return(list(portvol = sqrt(optout$objective), w1 = optout$solution)) } # Plots ------------------------------------------------------------------- # TODO: figure out how to animate this with gg animate? m1 = 0.05 m2 = 0.05 covar = 0 v1 = 0.09 v2 = 0.09 genplot <- function(m1, m2, v1, v2, covar) { # All possible values plotdata = tibble(w1 = seq(0, 1, length.out = 100)) %>% mutate(w2 = 1 - w1, portret = w1*m1 + w2*m2, portsd = sqrt(portvar(w1, m1, m2, v1, v2, covar))) # Efficient frontier eff_portdata = tibble(portret = seq(min(m1, m2), max(m1, m2), length.out = 100)) %>% mutate(opts = map(.x = portret, .f = {function(mu0) m_v_opt(mu0, m1, m2, v1, v2, covar)})) %>% mutate(portsd = map_dbl(opts, {function(x) pluck(x, "portvol")}), w1 = map_dbl(opts, {function(x) pluck(x, "w1")})) # Plot all possible values in color line, efficient frontier in dots plt = ggplot(plotdata, aes(x = portret*100, y = portsd*100)) + geom_line(aes(col = w1), size = 1) + geom_point(aes(col = w1), data = eff_portdata, size = 1.25) + scale_color_viridis_c("a1 Weight") + labs(x = "Portfolio Return", y = "Portfolio Volatility", title = "Accessible Assets with Portfolios: No Leverage") + theme_bw() return(list(data = plotdata, eff_data = eff_portdata,plot = plt)) } # Generates plots with leverage genplot_leverage <- function(m1, m2, v1, v2, covar) { # Lone portfolios with leverage 0 to 2x sd1 = sqrt(v1) sd2 = sqrt(v2) sharpe_1 = m1/sd1 sharpe_2 = m2/sd2 volmax = max(sd1*2, sd2*2) plotdata = tibble(portvol = seq(0, volmax, length.out = 100), portret1 = sharpe_1 * portvol, portret2 = sharpe_2 * portvol) covmat = matrix(c(v1, covar, covar, v2), nrow = 2, ncol = 2, byrow = TRUE) # range of the return - for purposes of plotting volrange = c(0, volmax) # Max sharpe ratio portfolio with leverage wstar = solve(covmat) %*% c(m1, m2) wstar = wstar/sum(wstar) ret_wstar = as.vector(t(wstar) %*% c(m1, m2)) var_wstar = portvar(wstar[1,], m1, m2, v1, v2, covar) sd_wstar = sqrt(var_wstar) sharpe_wstar = ret_wstar/sd_wstar # Add Combined portfolios with leverage plot_all = mutate(plotdata, portretstar = sharpe_wstar*portvol) %>% pivot_longer(cols = -portvol, values_to = "portret") ggplot(plot_all, aes(x = portvol*100, y = portret*100, col = name)) + geom_line(size = 1) + labs(x = "Portfolio Volatility", y = "Portfolio Return", title = "Accessible Assets with Portfolios: With Leverage") + coord_flip() + theme_bw() } # Baseline generated plots # TODO: Stich these together with cowplot?? # TODO: highligt that the optimal weight setwd("C:/Users/jason/Documents/jasonyang5.github.io/content/post/PortfolioSelection/") plot2 = genplot(0.05, 0.03, 0.3^2, 0.15^2, 0) ggsave("plot2.png") plot3 = genplot(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(0.9, 0.3^2, 0.15^2)) ggsave("plot3.png") plot4 = genplot(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(-0.9, 0.3^2, 0.15^2)) ggsave("plot4.png") plot5 = genplot(0.05, -0.005, 0.3^2, 0.15^2, cor2cov(-0.5, 0.3^2, 0.15^2)) ggsave("plot5.png") # Leverage plots plot_lev2 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, 0) ggsave("plot_lev2.png") plot_lev3 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(0.9, 0.3^2, 0.15^2)) ggsave("plot_lev3.png") plot_lev4 = genplot_leverage(0.05, 0.03, 0.3^2, 0.15^2, cor2cov(-0.9, 0.3^2, 0.15^2)) ggsave("plot_lev4.png") plot_lev5 = genplot_leverage(0.05, -0.005, 0.3^2, 0.15^2, cor2cov(-0.5, 0.3^2, 0.15^2)) ggsave("plot_lev5.png")
#' Calculate the weighted mean of fitted values for various levels of #' random effect terms. #' #' \code{REimpact} calculates the average predicted value for each row of a #' new data frame across the distribution of \code{\link{expectedRank}} for a #' merMod object. This allows the user to make meaningful comparisons about the #' influence of random effect terms on the scale of the response variable, #' for user-defined inputs, and accounting for the variability in grouping terms. #' #' The function predicts the response at every level in the random effect term #' specified by the user. Then, the expected rank of each group level is binned #' to the number of bins specified by the user. Finally, a weighted mean of the #' fitted value for all observations in each bin of the expected ranks is #' calculated using the inverse of the variance as the weight -- so that less #' precise estimates are downweighted in the calculation of the mean for the bin. #' Finally, a standard error for the bin mean is calculated. #' #' @param merMod An object of class merMod #' #' @param newdata a data frame of observations to calculate group-level differences #' for #' #' @param groupFctr The name of the grouping factor over which the random #' coefficient of interest varies. This is the variable to the right of the #' pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not #' specified, it will perform the calculation for the first effect listed #' by \code{ranef}. #' #' @param term The name of the random coefficient of interest. This is the #' variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial #' matching is attempted on the intercept term so the following character #' strings will all return rankings based on the intercept (\emph{provided that #' they do not match the name of another random coefficient for that factor}): #' \code{c("(Intercept)", "Int", "intercep", ...)}. #' #' @param breaks an integer representing the number of bins to divide the group #' effects into, the default is 3; alternatively it can specify breaks from 0-100 #' for how to cut the expected rank distribution #' #' @param ... additional arguments to pass to \code{\link{predictInterval}} #' #' @return A data.frame with all unique combinations of the number of cases, rows #' in the newdata element, and number of bins: #' \describe{ #' \item{case}{The row number of the observation from newdata.} #' \item{bin}{The ranking bin for the expected rank, the higher the bin number, #' the greater the expected rank of the groups in that bin.} #' \item{AvgFitWght}{The weighted mean of the fitted values for case i in bin k} #' \item{AvgFitWghtSE}{The standard deviation of the mean of the fitted values #' for case i in bin k.} #' \item{nobs}{The number of group effects contained in that bin.} #' } #' #' @details This function uses the formula for variance of a weighted mean #' recommended by Cochran (1977). #' #' @references #' Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. #' I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. #' 1995;11(2)1185-1193. Available at #' \url{https://www.sciencedirect.com/science/article/pii/135223109400210C} #' #' Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. #' #' @seealso \code{\link{expectedRank}}, \code{\link{predictInterval}} #' #' @examples #' \donttest{ #' #For a one-level random intercept model #' m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) #' m1.er <- REimpact(m1, newdata = sleepstudy[1, ], breaks = 2) #' #For a one-level random intercept model with multiple random terms #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' #ranked by the random slope on Days #' m2.er1 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="Days") #' #ranked by the random intercept #' m2.er2 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="int") #' #' # You can also pass additional arguments to predictInterval through REimpact #' g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #' zed <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", n.sims = 50, #' include.resid.var = TRUE) #' zed2 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "s", n.sims = 50, #' include.resid.var = TRUE) #' zed3 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", breaks = 5, #' n.sims = 50, include.resid.var = TRUE) #' } #' @export REimpact <- function(merMod, newdata, groupFctr=NULL, term = NULL, breaks = 3, ...){ if(missing(groupFctr)){ groupFctr <- names(ranef(merMod))[1] } lvls <- unique(merMod@frame[, groupFctr]) zed <- as.data.frame(lapply(newdata, rep, length(lvls))) zed[, groupFctr] <- rep(lvls, each = nrow(newdata)) zed[, "case"] <- rep(seq(1, nrow(newdata)), times = length(lvls)) outs1 <- cbind(zed, predictInterval(merMod, newdata = zed, ...)) outs1$var <- outs1$upr - outs1$lwr outs1$lwr <- NULL; outs1$upr <- NULL ranks <- expectedRank(merMod, groupFctr = groupFctr, term = term) ranks <- ranks[, c(2, 7)] outs1 <- merge(ranks, outs1, by.x = "groupLevel", by.y = groupFctr); rm(ranks) weighted.var.se <- function(x, w, na.rm=FALSE) # Computes the variance of a weighted mean following Cochran 1977 definition { if (na.rm) { w <- w[i <- !is.na(x)]; x <- x[i] } n = length(w) xWbar = weighted.mean(x,w,na.rm=na.rm) wbar = mean(w) out = n/((n-1)*sum(w)^2)*(sum((w*x-wbar*xWbar)^2)-2*xWbar*sum((w-wbar)*(w*x-wbar*xWbar))+xWbar^2*sum((w-wbar)^2)) return(out) } # bin pctER somehow outs1$bin <- cut(outs1$pctER, breaks = breaks, labels = FALSE, include.lowest = TRUE) bySum <- function(x){ AvgFit <- weighted.mean(x$fit, 1/x$var) AvgFitSE <- weighted.var.se(x$fit, 1/x$var) nobs <- length(x$fit) return(c(AvgFit, AvgFitSE, nobs)) } outs1 <- outs1[order(outs1$case, outs1$bin),] wMeans <- by(outs1, INDICES = list(outs1$case, outs1$bin), bySum) ids <- expand.grid(unique(outs1$case), unique(outs1$bin)) wMeans <- cbind(ids, do.call(rbind, wMeans)) names(wMeans) <- c("case", "bin", "AvgFit", "AvgFitSE", "nobs") return(wMeans) }
/R/merSubstEff.R
no_license
jknowles/merTools
R
false
false
6,458
r
#' Calculate the weighted mean of fitted values for various levels of #' random effect terms. #' #' \code{REimpact} calculates the average predicted value for each row of a #' new data frame across the distribution of \code{\link{expectedRank}} for a #' merMod object. This allows the user to make meaningful comparisons about the #' influence of random effect terms on the scale of the response variable, #' for user-defined inputs, and accounting for the variability in grouping terms. #' #' The function predicts the response at every level in the random effect term #' specified by the user. Then, the expected rank of each group level is binned #' to the number of bins specified by the user. Finally, a weighted mean of the #' fitted value for all observations in each bin of the expected ranks is #' calculated using the inverse of the variance as the weight -- so that less #' precise estimates are downweighted in the calculation of the mean for the bin. #' Finally, a standard error for the bin mean is calculated. #' #' @param merMod An object of class merMod #' #' @param newdata a data frame of observations to calculate group-level differences #' for #' #' @param groupFctr The name of the grouping factor over which the random #' coefficient of interest varies. This is the variable to the right of the #' pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not #' specified, it will perform the calculation for the first effect listed #' by \code{ranef}. #' #' @param term The name of the random coefficient of interest. This is the #' variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial #' matching is attempted on the intercept term so the following character #' strings will all return rankings based on the intercept (\emph{provided that #' they do not match the name of another random coefficient for that factor}): #' \code{c("(Intercept)", "Int", "intercep", ...)}. #' #' @param breaks an integer representing the number of bins to divide the group #' effects into, the default is 3; alternatively it can specify breaks from 0-100 #' for how to cut the expected rank distribution #' #' @param ... additional arguments to pass to \code{\link{predictInterval}} #' #' @return A data.frame with all unique combinations of the number of cases, rows #' in the newdata element, and number of bins: #' \describe{ #' \item{case}{The row number of the observation from newdata.} #' \item{bin}{The ranking bin for the expected rank, the higher the bin number, #' the greater the expected rank of the groups in that bin.} #' \item{AvgFitWght}{The weighted mean of the fitted values for case i in bin k} #' \item{AvgFitWghtSE}{The standard deviation of the mean of the fitted values #' for case i in bin k.} #' \item{nobs}{The number of group effects contained in that bin.} #' } #' #' @details This function uses the formula for variance of a weighted mean #' recommended by Cochran (1977). #' #' @references #' Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. #' I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. #' 1995;11(2)1185-1193. Available at #' \url{https://www.sciencedirect.com/science/article/pii/135223109400210C} #' #' Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. #' #' @seealso \code{\link{expectedRank}}, \code{\link{predictInterval}} #' #' @examples #' \donttest{ #' #For a one-level random intercept model #' m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) #' m1.er <- REimpact(m1, newdata = sleepstudy[1, ], breaks = 2) #' #For a one-level random intercept model with multiple random terms #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' #ranked by the random slope on Days #' m2.er1 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="Days") #' #ranked by the random intercept #' m2.er2 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="int") #' #' # You can also pass additional arguments to predictInterval through REimpact #' g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #' zed <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", n.sims = 50, #' include.resid.var = TRUE) #' zed2 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "s", n.sims = 50, #' include.resid.var = TRUE) #' zed3 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", breaks = 5, #' n.sims = 50, include.resid.var = TRUE) #' } #' @export REimpact <- function(merMod, newdata, groupFctr=NULL, term = NULL, breaks = 3, ...){ if(missing(groupFctr)){ groupFctr <- names(ranef(merMod))[1] } lvls <- unique(merMod@frame[, groupFctr]) zed <- as.data.frame(lapply(newdata, rep, length(lvls))) zed[, groupFctr] <- rep(lvls, each = nrow(newdata)) zed[, "case"] <- rep(seq(1, nrow(newdata)), times = length(lvls)) outs1 <- cbind(zed, predictInterval(merMod, newdata = zed, ...)) outs1$var <- outs1$upr - outs1$lwr outs1$lwr <- NULL; outs1$upr <- NULL ranks <- expectedRank(merMod, groupFctr = groupFctr, term = term) ranks <- ranks[, c(2, 7)] outs1 <- merge(ranks, outs1, by.x = "groupLevel", by.y = groupFctr); rm(ranks) weighted.var.se <- function(x, w, na.rm=FALSE) # Computes the variance of a weighted mean following Cochran 1977 definition { if (na.rm) { w <- w[i <- !is.na(x)]; x <- x[i] } n = length(w) xWbar = weighted.mean(x,w,na.rm=na.rm) wbar = mean(w) out = n/((n-1)*sum(w)^2)*(sum((w*x-wbar*xWbar)^2)-2*xWbar*sum((w-wbar)*(w*x-wbar*xWbar))+xWbar^2*sum((w-wbar)^2)) return(out) } # bin pctER somehow outs1$bin <- cut(outs1$pctER, breaks = breaks, labels = FALSE, include.lowest = TRUE) bySum <- function(x){ AvgFit <- weighted.mean(x$fit, 1/x$var) AvgFitSE <- weighted.var.se(x$fit, 1/x$var) nobs <- length(x$fit) return(c(AvgFit, AvgFitSE, nobs)) } outs1 <- outs1[order(outs1$case, outs1$bin),] wMeans <- by(outs1, INDICES = list(outs1$case, outs1$bin), bySum) ids <- expand.grid(unique(outs1$case), unique(outs1$bin)) wMeans <- cbind(ids, do.call(rbind, wMeans)) names(wMeans) <- c("case", "bin", "AvgFit", "AvgFitSE", "nobs") return(wMeans) }
#' Overview. Linear Group Fixed Effects #' #' The package uses the Method of Alternating Projections to estimate linear #' models with multiple group fixed effects. A generalization of the within #' estimator. It supports IV-estimation with multiple endogenous variables via #' 2SLS, with conditional F statistics for detection of weak instruments. It is #' thread-parallelized and intended for large problems. A method for correcting #' limited mobility bias is also included. #' #' #' This package is intended for linear models with multiple group fixed #' effects, i.e. with 2 or more factors with a large number of levels. It #' performs similar functions as \code{\link[stats]{lm}}, but it uses a special #' method for projecting out multiple group fixed effects from the normal #' equations, hence it is faster. It is a generalization of the within #' estimator. This may be required if the groups have high cardinality (many #' levels), resulting in tens or hundreds of thousands of dummy variables. It #' is also useful if one only wants to control for the group effects, without #' actually estimating them. The package may optionally compute standard #' errors for the group effects by bootstrapping, but this is a very time- and #' memory-consuming process compared to finding the point estimates. If you #' only have a single huge factor, the package \pkg{plm} is probably better #' suited. If your factors don't have thousands of levels, #' \code{\link[stats]{lm}} or other packages are probably better suited. #' \pkg{lfe} is designed to produce the same results as \code{\link[stats]{lm}} #' will do if run with the full set of dummies. #' #' Projecting out interactions between continuous covariates and factors is #' supported. I.e. individual slopes, not only individual intercepts. Multiple #' left hand sides are supported. #' #' The estimation is done in two steps. First the other coefficients are #' estimated with the function \code{\link{felm}} by centering on all the group #' means, followed by an OLS (similar to lm). Then the group effects are #' extracted (if needed) with the function \code{\link{getfe}}. This method is #' described by \cite{Gaure (2013)}, but also appears in \cite{Guimaraes and #' Portugal (2010)}, disguised as the Gauss-Seidel algorithm. #' #' There's also a function \code{\link{demeanlist}} which just does the #' centering on an arbitrary matrix or data frame, and there's a function #' \code{\link{compfactor}} which computes the connected components which are #' used for interpreting the group effects when there are only two factors (see #' the Abowd et al references), they are also returned by \code{\link{getfe}}. #' #' For those who study the correlation between the fixed effects, like in #' \cite{Abowd et al. (1999)}, there are functions \code{\link{bccorr}} and #' \code{\link{fevcov}} for computing limited mobility bias corrected #' correlations and variances with the method described in \cite{Gaure #' (2014b)}. #' #' Instrumental variable estimations are supported with 2SLS. Conditional F #' statistics for testing reduced rank weak instruments as in \cite{Sanderson #' and Windmeijer (2015)} are available in \code{\link{condfstat}}. Joint #' signficance testing of coefficients is available in \code{\link{waldtest}}. #' #' The centering on the means is done with a tolerance which is set by #' \code{options(lfe.eps=1e-8)} (the default). This is a somewhat conservative #' tolerance, in many cases I'd guess \code{1e-6} may be sufficient. This may #' speed up the centering. In the other direction, setting #' \code{options(lfe.eps=0)} will provide maximum accuracy at the cost of #' computing time and warnings about convergence failure. #' #' The package is threaded, that is, it may use more than one cpu. The number #' of threads is fetched upon loading the package from the environment variable #' \env{LFE_THREADS}, \env{OMP_THREAD_LIMIT}, \env{OMP_NUM_THREADS} or #' \env{NUMBER_OF_PROCESSORS} (for Windows), and stored by #' \code{options(lfe.threads=n)}. This option can be changed prior to calling #' \code{\link{felm}}, if so desired. Note that, typically, \pkg{lfe} is #' limited by memory bandwidth, not cpu speed, thus fast memory and large cache #' is more important than clock frequency. It is therefore also not always true #' that running on all available cores is much better than running on half of #' them. #' #' Threading is only done for the centering; the extraction of the group #' effects is not threaded. The default method for extracting the group #' coefficients is the iterative Kaczmarz-method, its tolerance is also the #' \code{lfe.eps} option. For some datasets the Kaczmarz-method is converging #' very slowly, in this case it may be replaced with a conjugate gradient #' method by setting the option \code{options(lfe.usecg=TRUE)}. Various #' time-consuming parts of \pkg{lfe} may print progress reports, the minimum #' interval in seconds is \code{options(lfe.pint=1800)}. #' #' The package has been tested on datasets with approx 20,000,000 observations #' with 15 covariates and approx 2,300,000 and 270,000 group levels (the #' \code{\link{felm}} took about 50 minutes on 8 cpus, the \code{\link{getfe}} #' takes 5 minutes). Though, beware that not only the size of the dataset #' matters, but also its structure, as demonstrated by \cite{Gaure (2014a)}. #' #' The package will work with any number of grouping factors, but if more than #' two, their interpretation is in general not well understood, i.e. one should #' make sure that the group coefficients are estimable. A discussion of #' estimability, the algorithm used, and convergence rate are available in #' vignettes, as well as in the published papers in the citation list #' (\code{citation('lfe')}). #' #' In the exec-directory there is a perl-script \code{lfescript} which is used #' at the author's site for automated creation of R-scripts from a simple #' specification file. The format is documented in \code{doc/lfeguide.txt}. #' #' \pkg{lfe} is similar in function, though not in method, to the Stata modules #' \code{a2reg} and \code{felsdvreg}. The method is very similar to the one in #' the Stata module \code{reghdfe}. #' #' @name lfe-package #' @aliases lfe-package lfe #' @docType package #' @references Abowd, J.M., F. Kramarz and D.N. Margolis (1999) \cite{High Wage #' Workers and High Wage Firms}, Econometrica 67 (1999), no. 2, 251--333. #' \url{http://dx.doi.org/10.1111/1468-0262.00020} #' #' Abowd, J.M., R. Creecy and F. Kramarz (2002) \cite{Computing Person and Firm #' Effects Using Linked Longitudinal Employer-Employee Data.} Technical Report #' TP-2002-06, U.S. Census Bureau. #' \url{http://lehd.did.census.gov/led/library/techpapers/tp-2002-06.pdf} #' #' Andrews, M., L. Gill, T. Schank and R. Upward (2008) \cite{High wage workers #' and low wage firms: negative assortative matching or limited mobility bias?} #' J.R. Stat. Soc.(A) 171(3), 673--697. #' \url{http://dx.doi.org/10.1111/j.1467-985X.2007.00533.x} #' #' Cornelissen, T. (2008) \cite{The stata command felsdvreg to fit a linear #' model with two high-dimensional fixed effects.} Stata Journal, #' 8(2):170--189, 2008. #' \url{http://econpapers.repec.org/RePEc:tsj:stataj:v:8:y:2008:i:2:p:170-189} #' #' Correia, S. (2014) \cite{REGHDFE: Stata module to perform linear or #' instrumental-variable regression absorbing any number of high-dimensional #' fixed effects}, Statistical Software Components, Boston College Department #' of Economics. \url{http://econpapers.repec.org/RePEc:boc:bocode:s457874} #' #' Croissant, Y. and G. Millo (2008) \cite{Panel Data Econometrics in R: The #' plm Package}, Journal of Statistical Software, 27(2). #' \url{http://www.jstatsoft.org/v27/i02/} #' #' Gaure, S. (2013) \cite{OLS with Multiple High Dimensional Category #' Variables.} Computational Statistics and Data Analysis, 66:8--18, 2013 #' \url{http://dx.doi.org/10.1016/j.csda.2013.03.024} #' #' Gaure, S. (2014a) \cite{lfe: Linear Group Fixed Effects.} The R Journal, #' 5(2):104-117, Dec 2013. #' \url{http://journal.r-project.org/archive/2013-2/gaure.pdf} #' #' Gaure, S. (2014b), \cite{Correlation bias correction in two-way #' fixed-effects linear regression}, Stat 3(1):379-390, 2014. #' \url{http://dx.doi.org/10.1002/sta4.68} #' #' Guimaraes, P. and Portugal, P. (2010) \cite{A simple feasible procedure to #' fit models with high-dimensional fixed effects.} The Stata Journal, #' 10(4):629--649, 2010. #' \url{http://www.stata-journal.com/article.html?article=st0212} #' #' Ouazad, A. (2008) \cite{A2REG: Stata module to estimate models with two #' fixed effects.} Statistical Software Components S456942, Boston College #' Department of Economics. #' \url{http://ideas.repec.org/c/boc/bocode/s456942.html} #' #' Sanderson, E. and F. Windmeijer (2014) \cite{A weak instrument F-test in #' linear IV models with multiple endogenous variables}, Journal of #' Econometrics, 2015. #' \url{http://www.sciencedirect.com/science/article/pii/S0304407615001736} #' @keywords regression models #' @examples #' #' oldopts <- options(lfe.threads=1) #' x <- rnorm(1000) #' x2 <- rnorm(length(x)) #' id <- factor(sample(10,length(x),replace=TRUE)) #' firm <- factor(sample(3,length(x),replace=TRUE,prob=c(2,1.5,1))) #' year <- factor(sample(10,length(x),replace=TRUE,prob=c(2,1.5,rep(1,8)))) #' id.eff <- rnorm(nlevels(id)) #' firm.eff <- rnorm(nlevels(firm)) #' year.eff <- rnorm(nlevels(year)) #' y <- x + 0.25*x2 + id.eff[id] + firm.eff[firm] + #' year.eff[year] + rnorm(length(x)) #' est <- felm(y ~ x+x2 | id + firm + year) #' summary(est) #' #' getfe(est,se=TRUE) #' # compare with an ordinary lm #' summary(lm(y ~ x+x2+id+firm+year-1)) #' options(oldopts) #' #' @useDynLib lfe, .registration=TRUE, .fixes='C_' #' @importFrom methods as #' @importFrom xtable xtable #' @importFrom sandwich estfun #' @import 'stats' #' @import Formula #' @importFrom Matrix t Diagonal rankMatrix Cholesky nnzero crossprod tcrossprod diag #' @importClassesFrom Matrix sparseMatrix NULL
/R/lfe-package.R
no_license
kendonB/lfe
R
false
false
10,128
r
#' Overview. Linear Group Fixed Effects #' #' The package uses the Method of Alternating Projections to estimate linear #' models with multiple group fixed effects. A generalization of the within #' estimator. It supports IV-estimation with multiple endogenous variables via #' 2SLS, with conditional F statistics for detection of weak instruments. It is #' thread-parallelized and intended for large problems. A method for correcting #' limited mobility bias is also included. #' #' #' This package is intended for linear models with multiple group fixed #' effects, i.e. with 2 or more factors with a large number of levels. It #' performs similar functions as \code{\link[stats]{lm}}, but it uses a special #' method for projecting out multiple group fixed effects from the normal #' equations, hence it is faster. It is a generalization of the within #' estimator. This may be required if the groups have high cardinality (many #' levels), resulting in tens or hundreds of thousands of dummy variables. It #' is also useful if one only wants to control for the group effects, without #' actually estimating them. The package may optionally compute standard #' errors for the group effects by bootstrapping, but this is a very time- and #' memory-consuming process compared to finding the point estimates. If you #' only have a single huge factor, the package \pkg{plm} is probably better #' suited. If your factors don't have thousands of levels, #' \code{\link[stats]{lm}} or other packages are probably better suited. #' \pkg{lfe} is designed to produce the same results as \code{\link[stats]{lm}} #' will do if run with the full set of dummies. #' #' Projecting out interactions between continuous covariates and factors is #' supported. I.e. individual slopes, not only individual intercepts. Multiple #' left hand sides are supported. #' #' The estimation is done in two steps. First the other coefficients are #' estimated with the function \code{\link{felm}} by centering on all the group #' means, followed by an OLS (similar to lm). Then the group effects are #' extracted (if needed) with the function \code{\link{getfe}}. This method is #' described by \cite{Gaure (2013)}, but also appears in \cite{Guimaraes and #' Portugal (2010)}, disguised as the Gauss-Seidel algorithm. #' #' There's also a function \code{\link{demeanlist}} which just does the #' centering on an arbitrary matrix or data frame, and there's a function #' \code{\link{compfactor}} which computes the connected components which are #' used for interpreting the group effects when there are only two factors (see #' the Abowd et al references), they are also returned by \code{\link{getfe}}. #' #' For those who study the correlation between the fixed effects, like in #' \cite{Abowd et al. (1999)}, there are functions \code{\link{bccorr}} and #' \code{\link{fevcov}} for computing limited mobility bias corrected #' correlations and variances with the method described in \cite{Gaure #' (2014b)}. #' #' Instrumental variable estimations are supported with 2SLS. Conditional F #' statistics for testing reduced rank weak instruments as in \cite{Sanderson #' and Windmeijer (2015)} are available in \code{\link{condfstat}}. Joint #' signficance testing of coefficients is available in \code{\link{waldtest}}. #' #' The centering on the means is done with a tolerance which is set by #' \code{options(lfe.eps=1e-8)} (the default). This is a somewhat conservative #' tolerance, in many cases I'd guess \code{1e-6} may be sufficient. This may #' speed up the centering. In the other direction, setting #' \code{options(lfe.eps=0)} will provide maximum accuracy at the cost of #' computing time and warnings about convergence failure. #' #' The package is threaded, that is, it may use more than one cpu. The number #' of threads is fetched upon loading the package from the environment variable #' \env{LFE_THREADS}, \env{OMP_THREAD_LIMIT}, \env{OMP_NUM_THREADS} or #' \env{NUMBER_OF_PROCESSORS} (for Windows), and stored by #' \code{options(lfe.threads=n)}. This option can be changed prior to calling #' \code{\link{felm}}, if so desired. Note that, typically, \pkg{lfe} is #' limited by memory bandwidth, not cpu speed, thus fast memory and large cache #' is more important than clock frequency. It is therefore also not always true #' that running on all available cores is much better than running on half of #' them. #' #' Threading is only done for the centering; the extraction of the group #' effects is not threaded. The default method for extracting the group #' coefficients is the iterative Kaczmarz-method, its tolerance is also the #' \code{lfe.eps} option. For some datasets the Kaczmarz-method is converging #' very slowly, in this case it may be replaced with a conjugate gradient #' method by setting the option \code{options(lfe.usecg=TRUE)}. Various #' time-consuming parts of \pkg{lfe} may print progress reports, the minimum #' interval in seconds is \code{options(lfe.pint=1800)}. #' #' The package has been tested on datasets with approx 20,000,000 observations #' with 15 covariates and approx 2,300,000 and 270,000 group levels (the #' \code{\link{felm}} took about 50 minutes on 8 cpus, the \code{\link{getfe}} #' takes 5 minutes). Though, beware that not only the size of the dataset #' matters, but also its structure, as demonstrated by \cite{Gaure (2014a)}. #' #' The package will work with any number of grouping factors, but if more than #' two, their interpretation is in general not well understood, i.e. one should #' make sure that the group coefficients are estimable. A discussion of #' estimability, the algorithm used, and convergence rate are available in #' vignettes, as well as in the published papers in the citation list #' (\code{citation('lfe')}). #' #' In the exec-directory there is a perl-script \code{lfescript} which is used #' at the author's site for automated creation of R-scripts from a simple #' specification file. The format is documented in \code{doc/lfeguide.txt}. #' #' \pkg{lfe} is similar in function, though not in method, to the Stata modules #' \code{a2reg} and \code{felsdvreg}. The method is very similar to the one in #' the Stata module \code{reghdfe}. #' #' @name lfe-package #' @aliases lfe-package lfe #' @docType package #' @references Abowd, J.M., F. Kramarz and D.N. Margolis (1999) \cite{High Wage #' Workers and High Wage Firms}, Econometrica 67 (1999), no. 2, 251--333. #' \url{http://dx.doi.org/10.1111/1468-0262.00020} #' #' Abowd, J.M., R. Creecy and F. Kramarz (2002) \cite{Computing Person and Firm #' Effects Using Linked Longitudinal Employer-Employee Data.} Technical Report #' TP-2002-06, U.S. Census Bureau. #' \url{http://lehd.did.census.gov/led/library/techpapers/tp-2002-06.pdf} #' #' Andrews, M., L. Gill, T. Schank and R. Upward (2008) \cite{High wage workers #' and low wage firms: negative assortative matching or limited mobility bias?} #' J.R. Stat. Soc.(A) 171(3), 673--697. #' \url{http://dx.doi.org/10.1111/j.1467-985X.2007.00533.x} #' #' Cornelissen, T. (2008) \cite{The stata command felsdvreg to fit a linear #' model with two high-dimensional fixed effects.} Stata Journal, #' 8(2):170--189, 2008. #' \url{http://econpapers.repec.org/RePEc:tsj:stataj:v:8:y:2008:i:2:p:170-189} #' #' Correia, S. (2014) \cite{REGHDFE: Stata module to perform linear or #' instrumental-variable regression absorbing any number of high-dimensional #' fixed effects}, Statistical Software Components, Boston College Department #' of Economics. \url{http://econpapers.repec.org/RePEc:boc:bocode:s457874} #' #' Croissant, Y. and G. Millo (2008) \cite{Panel Data Econometrics in R: The #' plm Package}, Journal of Statistical Software, 27(2). #' \url{http://www.jstatsoft.org/v27/i02/} #' #' Gaure, S. (2013) \cite{OLS with Multiple High Dimensional Category #' Variables.} Computational Statistics and Data Analysis, 66:8--18, 2013 #' \url{http://dx.doi.org/10.1016/j.csda.2013.03.024} #' #' Gaure, S. (2014a) \cite{lfe: Linear Group Fixed Effects.} The R Journal, #' 5(2):104-117, Dec 2013. #' \url{http://journal.r-project.org/archive/2013-2/gaure.pdf} #' #' Gaure, S. (2014b), \cite{Correlation bias correction in two-way #' fixed-effects linear regression}, Stat 3(1):379-390, 2014. #' \url{http://dx.doi.org/10.1002/sta4.68} #' #' Guimaraes, P. and Portugal, P. (2010) \cite{A simple feasible procedure to #' fit models with high-dimensional fixed effects.} The Stata Journal, #' 10(4):629--649, 2010. #' \url{http://www.stata-journal.com/article.html?article=st0212} #' #' Ouazad, A. (2008) \cite{A2REG: Stata module to estimate models with two #' fixed effects.} Statistical Software Components S456942, Boston College #' Department of Economics. #' \url{http://ideas.repec.org/c/boc/bocode/s456942.html} #' #' Sanderson, E. and F. Windmeijer (2014) \cite{A weak instrument F-test in #' linear IV models with multiple endogenous variables}, Journal of #' Econometrics, 2015. #' \url{http://www.sciencedirect.com/science/article/pii/S0304407615001736} #' @keywords regression models #' @examples #' #' oldopts <- options(lfe.threads=1) #' x <- rnorm(1000) #' x2 <- rnorm(length(x)) #' id <- factor(sample(10,length(x),replace=TRUE)) #' firm <- factor(sample(3,length(x),replace=TRUE,prob=c(2,1.5,1))) #' year <- factor(sample(10,length(x),replace=TRUE,prob=c(2,1.5,rep(1,8)))) #' id.eff <- rnorm(nlevels(id)) #' firm.eff <- rnorm(nlevels(firm)) #' year.eff <- rnorm(nlevels(year)) #' y <- x + 0.25*x2 + id.eff[id] + firm.eff[firm] + #' year.eff[year] + rnorm(length(x)) #' est <- felm(y ~ x+x2 | id + firm + year) #' summary(est) #' #' getfe(est,se=TRUE) #' # compare with an ordinary lm #' summary(lm(y ~ x+x2+id+firm+year-1)) #' options(oldopts) #' #' @useDynLib lfe, .registration=TRUE, .fixes='C_' #' @importFrom methods as #' @importFrom xtable xtable #' @importFrom sandwich estfun #' @import 'stats' #' @import Formula #' @importFrom Matrix t Diagonal rankMatrix Cholesky nnzero crossprod tcrossprod diag #' @importClassesFrom Matrix sparseMatrix NULL
#!/usr/bin/env Rscript # bosh liu # durga # subset top n genes according to rpkm sum # command args: args=commandArgs(T) input_file=args[1] output_file=args[2] n=args[3] # read input: input=fread(input_file,header=T) # calculate sum of expression: rpkm_sum=rowSums(input[,-1,with=F]) rpkm_rank=rank(rpkm_sum) # subset for top n genes: keep=(rpkm_rank<=n) output=input[keep,] # write output: write.table(output,output_file,quote=F,sep='\t',col.names=T,row.names=F)
/eqtl/peer/subset_top_genes.R
no_license
chanibravo/hcasmc_eqtl
R
false
false
477
r
#!/usr/bin/env Rscript # bosh liu # durga # subset top n genes according to rpkm sum # command args: args=commandArgs(T) input_file=args[1] output_file=args[2] n=args[3] # read input: input=fread(input_file,header=T) # calculate sum of expression: rpkm_sum=rowSums(input[,-1,with=F]) rpkm_rank=rank(rpkm_sum) # subset for top n genes: keep=(rpkm_rank<=n) output=input[keep,] # write output: write.table(output,output_file,quote=F,sep='\t',col.names=T,row.names=F)
library(parsetools) ### Name: pd_class_definitions ### Title: Test for Class Definitions ### Aliases: pd_class_definitions pd_is_class_definition ### pd_is_in_class_definition pd_add_class_definition pd_add_class ### Keywords: datasets ### ** Examples # load example file and get_parse data ex.file <- system.file("examples", "example.R", package="parsetools") exprs <- parse(ex.file, keep.source = TRUE) pd <- get_parse_data(exprs) # There are 3 expressions so there should be three roots. sum(pd_is_root(pd$id, pd)) roots <- pd_all_root_ids(pd) # Get the 'setClass' call. class.id <- pd_get_assign_value_id(roots[2], pd) # Check to make sure that it is a function that sets a class. pd_is_class_definition(class.id, pd) # and that it is the setClass call. pd_text(pd_get_call_symbol_id(class.id, pd), pd)
/data/genthat_extracted_code/parsetools/examples/pd_class_definitions.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
819
r
library(parsetools) ### Name: pd_class_definitions ### Title: Test for Class Definitions ### Aliases: pd_class_definitions pd_is_class_definition ### pd_is_in_class_definition pd_add_class_definition pd_add_class ### Keywords: datasets ### ** Examples # load example file and get_parse data ex.file <- system.file("examples", "example.R", package="parsetools") exprs <- parse(ex.file, keep.source = TRUE) pd <- get_parse_data(exprs) # There are 3 expressions so there should be three roots. sum(pd_is_root(pd$id, pd)) roots <- pd_all_root_ids(pd) # Get the 'setClass' call. class.id <- pd_get_assign_value_id(roots[2], pd) # Check to make sure that it is a function that sets a class. pd_is_class_definition(class.id, pd) # and that it is the setClass call. pd_text(pd_get_call_symbol_id(class.id, pd), pd)
accs = get_mt_ids("acc26") #filter Pindel output dir = file.path(DIR_Repo, "mt_35/40_sv/31_pindel") f1 = file.path(dir, "11.tbl") p1 = read.table(f1, sep="\t", header=T, as.is=T) p2 = p1[p1$size_d >= 30 & p1$n_ind >=2 & p1$n_reads_uniq >= 5,] write.table(p2, file.path(dir, "12_filtered.tbl"), sep="\t", quote=F, row.names=F, col.names=T) #process and filter CREST outputs dir = file.path(DIR_Repo, "mt_35/40_sv/33_crest") fi = file.path(dir, "11_sum.tbl") t01 = read.table(fi, header=T, sep="\t", as.is=T) t01 = t01[t01$acc %in% accs,] t02 = aggregate(t01$acc, by=list(factor(t01$chr_l), factor(t01$pos_l), factor(t01$strand_l), factor(t01$chr_r), factor(t01$pos_r), factor(t01$strand_r), factor(t01$type)), FUN=strconcat) colnames(t02) = c("chr_l", "pos_l", "strand_l", "chr_r", "pos_r", "strand_r", "type", "acc") t02$pos_l = as.numeric(as.character(t02$pos_l)) t02$pos_r = as.numeric(as.character(t02$pos_r)) t03 = cbind(id=1:nrow(t02), t02, len = t02$pos_r-t02$pos_l-1, n_acc = as.numeric(lapply(strsplit(t02$acc, " "), FUN=length))) write.table(t03, file.path(dir, "21.tbl"), sep="\t", quote=F, row.names=F, col.names=T) t04 = t03[t03$n_acc >= 2 & t03$type == 'DEL' & t03$len < 5000, ] write.table(t04, file.path(dir, "22_filtered.tbl"), sep="\t", quote=F, row.names=F, col.names=T) #get overlap btw Pindel & CREST dir = file.path(DIR_Repo, "mt_35/40_sv/41_shared") fp = file.path(dir, "../31_pindel/12_filtered.tbl") p1 = read.table(fp, sep="\t", header=T, as.is=T) fc = file.path(dir, "../33_crest/22_filtered.tbl") c1 = read.table(fc, sep="\t", header=T, as.is=T) c2 = cbind(c1, id_pindel=NA) for (i in 1:nrow(c2)) { p2 = p1[abs(p1$beg-c2$pos_l[i]) <= 10 & abs(p1$end-c2$pos_r[i]) <= 10,] # c2$id_pindel[i] = length(p2$id) if(nrow(p2) >= 1) {c2$id_pindel[i] = p2$id[which.max(p2$n_ind)]} } colnames(c2)[1] = "id_crest" colnames(p1)[1] = "id_pindel" c3 = merge(p1, c2, by="id_pindel") idxs = c() for (id_pindel in unique(c3$id_pindel)) { c4 = c3[c3$id_pindel == id_pindel,] idx = which.max(c4$n_acc) idxs = c(idxs, row.names(c4)[idx]) } c5 = c3[idxs,] write.table(c5, file.path(dir, "11.tbl"), sep="\t", quote=F, row.names=F, col.names=T) write.table(c5[,1:4], file.path(dir, "12_simple.tbl"), sep="\t", quote=F, row.names=F, col.names=T)
/r/crest.R
no_license
rakeshponnala/luffy
R
false
false
2,269
r
accs = get_mt_ids("acc26") #filter Pindel output dir = file.path(DIR_Repo, "mt_35/40_sv/31_pindel") f1 = file.path(dir, "11.tbl") p1 = read.table(f1, sep="\t", header=T, as.is=T) p2 = p1[p1$size_d >= 30 & p1$n_ind >=2 & p1$n_reads_uniq >= 5,] write.table(p2, file.path(dir, "12_filtered.tbl"), sep="\t", quote=F, row.names=F, col.names=T) #process and filter CREST outputs dir = file.path(DIR_Repo, "mt_35/40_sv/33_crest") fi = file.path(dir, "11_sum.tbl") t01 = read.table(fi, header=T, sep="\t", as.is=T) t01 = t01[t01$acc %in% accs,] t02 = aggregate(t01$acc, by=list(factor(t01$chr_l), factor(t01$pos_l), factor(t01$strand_l), factor(t01$chr_r), factor(t01$pos_r), factor(t01$strand_r), factor(t01$type)), FUN=strconcat) colnames(t02) = c("chr_l", "pos_l", "strand_l", "chr_r", "pos_r", "strand_r", "type", "acc") t02$pos_l = as.numeric(as.character(t02$pos_l)) t02$pos_r = as.numeric(as.character(t02$pos_r)) t03 = cbind(id=1:nrow(t02), t02, len = t02$pos_r-t02$pos_l-1, n_acc = as.numeric(lapply(strsplit(t02$acc, " "), FUN=length))) write.table(t03, file.path(dir, "21.tbl"), sep="\t", quote=F, row.names=F, col.names=T) t04 = t03[t03$n_acc >= 2 & t03$type == 'DEL' & t03$len < 5000, ] write.table(t04, file.path(dir, "22_filtered.tbl"), sep="\t", quote=F, row.names=F, col.names=T) #get overlap btw Pindel & CREST dir = file.path(DIR_Repo, "mt_35/40_sv/41_shared") fp = file.path(dir, "../31_pindel/12_filtered.tbl") p1 = read.table(fp, sep="\t", header=T, as.is=T) fc = file.path(dir, "../33_crest/22_filtered.tbl") c1 = read.table(fc, sep="\t", header=T, as.is=T) c2 = cbind(c1, id_pindel=NA) for (i in 1:nrow(c2)) { p2 = p1[abs(p1$beg-c2$pos_l[i]) <= 10 & abs(p1$end-c2$pos_r[i]) <= 10,] # c2$id_pindel[i] = length(p2$id) if(nrow(p2) >= 1) {c2$id_pindel[i] = p2$id[which.max(p2$n_ind)]} } colnames(c2)[1] = "id_crest" colnames(p1)[1] = "id_pindel" c3 = merge(p1, c2, by="id_pindel") idxs = c() for (id_pindel in unique(c3$id_pindel)) { c4 = c3[c3$id_pindel == id_pindel,] idx = which.max(c4$n_acc) idxs = c(idxs, row.names(c4)[idx]) } c5 = c3[idxs,] write.table(c5, file.path(dir, "11.tbl"), sep="\t", quote=F, row.names=F, col.names=T) write.table(c5[,1:4], file.path(dir, "12_simple.tbl"), sep="\t", quote=F, row.names=F, col.names=T)
#import features.txt file to determine which columns to use in the data set features<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt", sep ="", header = FALSE, dec =".") #Only retain features that contain mean() or std() values. We get 66 out of the initial 561 features. used_features<-grep("mean+\\()|std+\\()",as.character(features[,2]),value=TRUE) ##-------------------------------------####-------------------------------------#####-------------------------- ##-------------------------------------####-------------------------------------#####-------------------------- #import from local computer text files X_train.txt, Y_train.txt, X_test.txt, Y_test.txt using function read.tabe() # import training set X_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt", sep ="", header = FALSE, dec =".") colnames(X_train)<-features[,2] y_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt", sep ="", header = FALSE, dec =".") colnames(y_train)<-("Label") #import test set X_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", sep ="", header = FALSE, dec =".") colnames(X_test)<-features[,2] y_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", sep ="", header = FALSE, dec =".") colnames(y_test)<-("Label") #The new training and test set containing only the required columns new_X_train<-X_train[,used_features] new_X_test<-X_test[,used_features] # create the final desired data set smartphone_features<-rbind(new_X_train,new_X_test) smartphone_label<-rbind(y_train,y_test) #replace (1,2,3,4,5,6) with descriptive words (Walking,Walking_upstairs,Walking_Downstairs,Sitting,Standing,Laying) in the label. descriptive_smartphone_label<-rep(0,length(smartphone_label)) descriptive_smartphone_label[smartphone_label==1]<-"WALKING" descriptive_smartphone_label[smartphone_label==2]<-"WALKING_UPSTAIRS" descriptive_smartphone_label[smartphone_label==3]<-"WALKING_DOWNSTAIRS" descriptive_smartphone_label[smartphone_label==4]<-"SITTING" descriptive_smartphone_label[smartphone_label==5]<-"STANDING" descriptive_smartphone_label[smartphone_label==6]<-"LAYING" descriptive_smartphone_label<-as.factor(descriptive_smartphone_label) descriptive_smartphone_label<-data.frame(descriptive_smartphone_label) colnames(descriptive_smartphone_label)<-"Activity" # Here is our desired dataset smartphone_dataset<- cbind(smartphone_features,descriptive_smartphone_label) ##-------------------------------------####-------------------------------------#####-------------------------- ##-------------------------------------####-------------------------------------#####-------------------------- #here, row bind the subject training set and subject test set from the subject_train.txt and subject_test.txt files into a data frame called subject. subject_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", sep ="", header = FALSE, dec =".") subject_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", sep ="", header = FALSE, dec =".") subject<-rbind(subject_train,subject_test) colnames(subject)<-"subject" #Activity<- descriptive_smartphone_label[1:7352,1] . # the the new smartphone dataset is. new_smartphone_dataset<- cbind(subject,smartphone_dataset) #Finally the aggregate function allows us to find the mean of all variables for each subject and for each activity by_Subject_by_Activity<-aggregate(new_smartphone_dataset[, 2:67], list(new_smartphone_dataset$subject,new_smartphone_dataset$Activity), mean)
/run_analysis.R
no_license
shivaygulati/Getting-and-cleaning-data-from-the-accelerometers-in-the-Samsung-Galaxy-S-smartphone
R
false
false
4,455
r
#import features.txt file to determine which columns to use in the data set features<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt", sep ="", header = FALSE, dec =".") #Only retain features that contain mean() or std() values. We get 66 out of the initial 561 features. used_features<-grep("mean+\\()|std+\\()",as.character(features[,2]),value=TRUE) ##-------------------------------------####-------------------------------------#####-------------------------- ##-------------------------------------####-------------------------------------#####-------------------------- #import from local computer text files X_train.txt, Y_train.txt, X_test.txt, Y_test.txt using function read.tabe() # import training set X_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt", sep ="", header = FALSE, dec =".") colnames(X_train)<-features[,2] y_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt", sep ="", header = FALSE, dec =".") colnames(y_train)<-("Label") #import test set X_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", sep ="", header = FALSE, dec =".") colnames(X_test)<-features[,2] y_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", sep ="", header = FALSE, dec =".") colnames(y_test)<-("Label") #The new training and test set containing only the required columns new_X_train<-X_train[,used_features] new_X_test<-X_test[,used_features] # create the final desired data set smartphone_features<-rbind(new_X_train,new_X_test) smartphone_label<-rbind(y_train,y_test) #replace (1,2,3,4,5,6) with descriptive words (Walking,Walking_upstairs,Walking_Downstairs,Sitting,Standing,Laying) in the label. descriptive_smartphone_label<-rep(0,length(smartphone_label)) descriptive_smartphone_label[smartphone_label==1]<-"WALKING" descriptive_smartphone_label[smartphone_label==2]<-"WALKING_UPSTAIRS" descriptive_smartphone_label[smartphone_label==3]<-"WALKING_DOWNSTAIRS" descriptive_smartphone_label[smartphone_label==4]<-"SITTING" descriptive_smartphone_label[smartphone_label==5]<-"STANDING" descriptive_smartphone_label[smartphone_label==6]<-"LAYING" descriptive_smartphone_label<-as.factor(descriptive_smartphone_label) descriptive_smartphone_label<-data.frame(descriptive_smartphone_label) colnames(descriptive_smartphone_label)<-"Activity" # Here is our desired dataset smartphone_dataset<- cbind(smartphone_features,descriptive_smartphone_label) ##-------------------------------------####-------------------------------------#####-------------------------- ##-------------------------------------####-------------------------------------#####-------------------------- #here, row bind the subject training set and subject test set from the subject_train.txt and subject_test.txt files into a data frame called subject. subject_train<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", sep ="", header = FALSE, dec =".") subject_test<-read.table("C:/Users/raoul/OneDrive/Documents/DataScience-Coursera/week1-clenning-data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", sep ="", header = FALSE, dec =".") subject<-rbind(subject_train,subject_test) colnames(subject)<-"subject" #Activity<- descriptive_smartphone_label[1:7352,1] . # the the new smartphone dataset is. new_smartphone_dataset<- cbind(subject,smartphone_dataset) #Finally the aggregate function allows us to find the mean of all variables for each subject and for each activity by_Subject_by_Activity<-aggregate(new_smartphone_dataset[, 2:67], list(new_smartphone_dataset$subject,new_smartphone_dataset$Activity), mean)
source('../WERM_Heuristic.R') # A function for estimating \hat{W*}. This function is dependent on the graph. myComputeSW.lowhigh = function(outVar.train, inVar.train, outVar.eval, inVar.eval, outputname){ # Compute P(outputVal|inputVal1,...,inputValD) inVar_margin = data.matrix(rep(1,length(outVar.train))) model_xgboost = xgboost(verbose = 0, data = data.matrix(inVar.train), label = outVar.train, nrounds = 20,max.depth=10,lambda=1/length(outVar.train),alpha=1/length(outVar.train), objective = "binary:logistic") model_xgboost_margin = xgboost(verbose = 0, data = inVar_margin, label = outVar.eval, nrounds = 20,max.depth=10,lambda=1/length(outVar.train),alpha=1/length(outVar.train), objective = "binary:logistic") Prob.outVar.1 = predict(model_xgboost_margin,newdata=inVar_margin,type='response') Prob.outVar.0 = 1 - Prob.outVar.1 Prob.outVar = diag(Prob.outVar.1) %*% as.matrix(outVar.eval) + diag(Prob.outVar.0) %*% as.matrix(1-outVar.eval) Prob.outVar.1.giveninVar = predict(model_xgboost,newdata=data.matrix(inVar.eval),type="response") Prob.outVar.0.giveninVar = 1-Prob.outVar.1.giveninVar Prob.outVar.inVar = diag(Prob.outVar.1.giveninVar) %*% as.matrix(outVar.eval) + diag(Prob.outVar.0.giveninVar) %*% as.matrix(1-outVar.eval) SWXX = Prob.outVar / (Prob.outVar.inVar) return(SWXX) } SplitDataset = function(DATA){ all_idx = c(1:nrow(DATA)) train_idx = sample(c(1:nrow(DATA)),nrow(DATA)/2) eval_idx = setdiff(all_idx,train_idx) DATA.train = DATA[train_idx,] rownames(DATA.train) = c(1:nrow(DATA.train)) DATA.eval = DATA[eval_idx,] rownames(DATA.eval) = c(1:nrow(DATA.eval)) return(list(DATA.train,DATA.eval)) } asBDEstimator = function(OBS,D){ W = OBS[,1:D] Z = OBS[,(D+1)] X = OBS[,(D+2)] Y = OBS[,(D+3)] DATA = data.frame(W,Z,X,Y) ################################################################ # Compute E[Y|x,z,w] ################################################################ model.Y = learnXG(inVar = data.matrix(data.frame(W=W,Z=Z,X=X)),labelval = Y, regval = rep(0,nrow(DATA))) DATA.X0 = data.frame(W=W,Z=Z,X=rep(0,nrow(DATA))) DATA.X1 = data.frame(W=W,Z=Z,X=rep(1,nrow(DATA))) ################################################################ # Predict ################################################################ pred.X0 = predict(model.Y, newdata=data.matrix(DATA.X0),type='response') pred.X1 = predict(model.Y, newdata=data.matrix(DATA.X1),type='response') return(c(mean(pred.X0),mean(pred.X1))) }
/demonstration/Example_1-napkin/napkin-as-BD.R
no_license
CMLennon/WERM
R
false
false
2,531
r
source('../WERM_Heuristic.R') # A function for estimating \hat{W*}. This function is dependent on the graph. myComputeSW.lowhigh = function(outVar.train, inVar.train, outVar.eval, inVar.eval, outputname){ # Compute P(outputVal|inputVal1,...,inputValD) inVar_margin = data.matrix(rep(1,length(outVar.train))) model_xgboost = xgboost(verbose = 0, data = data.matrix(inVar.train), label = outVar.train, nrounds = 20,max.depth=10,lambda=1/length(outVar.train),alpha=1/length(outVar.train), objective = "binary:logistic") model_xgboost_margin = xgboost(verbose = 0, data = inVar_margin, label = outVar.eval, nrounds = 20,max.depth=10,lambda=1/length(outVar.train),alpha=1/length(outVar.train), objective = "binary:logistic") Prob.outVar.1 = predict(model_xgboost_margin,newdata=inVar_margin,type='response') Prob.outVar.0 = 1 - Prob.outVar.1 Prob.outVar = diag(Prob.outVar.1) %*% as.matrix(outVar.eval) + diag(Prob.outVar.0) %*% as.matrix(1-outVar.eval) Prob.outVar.1.giveninVar = predict(model_xgboost,newdata=data.matrix(inVar.eval),type="response") Prob.outVar.0.giveninVar = 1-Prob.outVar.1.giveninVar Prob.outVar.inVar = diag(Prob.outVar.1.giveninVar) %*% as.matrix(outVar.eval) + diag(Prob.outVar.0.giveninVar) %*% as.matrix(1-outVar.eval) SWXX = Prob.outVar / (Prob.outVar.inVar) return(SWXX) } SplitDataset = function(DATA){ all_idx = c(1:nrow(DATA)) train_idx = sample(c(1:nrow(DATA)),nrow(DATA)/2) eval_idx = setdiff(all_idx,train_idx) DATA.train = DATA[train_idx,] rownames(DATA.train) = c(1:nrow(DATA.train)) DATA.eval = DATA[eval_idx,] rownames(DATA.eval) = c(1:nrow(DATA.eval)) return(list(DATA.train,DATA.eval)) } asBDEstimator = function(OBS,D){ W = OBS[,1:D] Z = OBS[,(D+1)] X = OBS[,(D+2)] Y = OBS[,(D+3)] DATA = data.frame(W,Z,X,Y) ################################################################ # Compute E[Y|x,z,w] ################################################################ model.Y = learnXG(inVar = data.matrix(data.frame(W=W,Z=Z,X=X)),labelval = Y, regval = rep(0,nrow(DATA))) DATA.X0 = data.frame(W=W,Z=Z,X=rep(0,nrow(DATA))) DATA.X1 = data.frame(W=W,Z=Z,X=rep(1,nrow(DATA))) ################################################################ # Predict ################################################################ pred.X0 = predict(model.Y, newdata=data.matrix(DATA.X0),type='response') pred.X1 = predict(model.Y, newdata=data.matrix(DATA.X1),type='response') return(c(mean(pred.X0),mean(pred.X1))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.mhtest} \alias{plot.mhtest} \title{Plot method} \usage{ \method{plot}{mhtest}(x, type = c("Manhattan", "Q-Q", "Volcano"), main = paste(type, "Plot"), coeff = 1, ...) } \arguments{ \item{x}{An object of class \code{mhtest}.} \item{type}{Either \itemize{ \item "Manhattan": plot of the negative logarithm (in base 10) of p-values (the default). \item "Q-Q": Q-Q plot. \item "Volcaco": plot of the negative logarithm of p-values against the estimation of coefficients (e.g. betas in linear regression). }} \item{main}{The title of the plot. Default use the \code{type}.} \item{coeff}{Relative size of text. Default is \code{1}.} \item{...}{Not used.} } \value{ A \code{ggplot2} object. You can plot it using the \code{print} method. You can modify it as you wish by adding layers. You might want to read \href{http://r4ds.had.co.nz/data-visualisation.html}{this chapter} to get more familiar with the package \strong{ggplot2}. } \description{ Plot method for class \code{mhtest}. } \examples{ set.seed(1) X <- big_attachExtdata() y <- rnorm(nrow(X)) test <- big_univLinReg(X, y) plot(test) plot(test, type = "Volcano") plot(test, type = "Q-Q") } \seealso{ \link{big_univLinReg}, \link{big_univLogReg}, \link{plot.big_SVD} and \link{asPlotlyText}. }
/man/plot.mhtest.Rd
no_license
pamelinni/bigstatsr
R
false
true
1,351
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.mhtest} \alias{plot.mhtest} \title{Plot method} \usage{ \method{plot}{mhtest}(x, type = c("Manhattan", "Q-Q", "Volcano"), main = paste(type, "Plot"), coeff = 1, ...) } \arguments{ \item{x}{An object of class \code{mhtest}.} \item{type}{Either \itemize{ \item "Manhattan": plot of the negative logarithm (in base 10) of p-values (the default). \item "Q-Q": Q-Q plot. \item "Volcaco": plot of the negative logarithm of p-values against the estimation of coefficients (e.g. betas in linear regression). }} \item{main}{The title of the plot. Default use the \code{type}.} \item{coeff}{Relative size of text. Default is \code{1}.} \item{...}{Not used.} } \value{ A \code{ggplot2} object. You can plot it using the \code{print} method. You can modify it as you wish by adding layers. You might want to read \href{http://r4ds.had.co.nz/data-visualisation.html}{this chapter} to get more familiar with the package \strong{ggplot2}. } \description{ Plot method for class \code{mhtest}. } \examples{ set.seed(1) X <- big_attachExtdata() y <- rnorm(nrow(X)) test <- big_univLinReg(X, y) plot(test) plot(test, type = "Volcano") plot(test, type = "Q-Q") } \seealso{ \link{big_univLinReg}, \link{big_univLogReg}, \link{plot.big_SVD} and \link{asPlotlyText}. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpfunctions_JAGS.R \name{sum_duration} \alias{sum_duration} \title{Calculate the sum of the computational duration of a JointAI object} \usage{ sum_duration(object, by = NULL) } \arguments{ \item{object}{object of class \code{JointAI}} \item{by}{optional grouping information; options are \code{NULL} (default) to calculate the sum over all chains and runs and both the adaptive and sampling phase, \code{"run"} to get the duration per run, \code{"phase"} to get the sum over all chains and runs per phase, \code{"chain"} to get the sum per chain over both phases and all runs, \code{"phase and run"} to get the sum over all chains, separately per phase and run.} } \description{ Calculate the sum of the computational duration of a JointAI object }
/man/sum_duration.Rd
no_license
NErler/JointAI
R
false
true
831
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpfunctions_JAGS.R \name{sum_duration} \alias{sum_duration} \title{Calculate the sum of the computational duration of a JointAI object} \usage{ sum_duration(object, by = NULL) } \arguments{ \item{object}{object of class \code{JointAI}} \item{by}{optional grouping information; options are \code{NULL} (default) to calculate the sum over all chains and runs and both the adaptive and sampling phase, \code{"run"} to get the duration per run, \code{"phase"} to get the sum over all chains and runs per phase, \code{"chain"} to get the sum per chain over both phases and all runs, \code{"phase and run"} to get the sum over all chains, separately per phase and run.} } \description{ Calculate the sum of the computational duration of a JointAI object }
library(tidyverse) library(lubridate) library(gridExtra) library(plotly) library(roll) library(magick) library(kableExtra) # Prince George Airport Plot ---------------------------------------------- # Comparing 2019, 2020 with 1989-2010 climate normals pgdat <- paste0("Climate-Comparisons/pg_daily_data/", dir(path = "Climate-Comparisons/pg_daily_data/")) pgdat <- pgdat %>% map(read_csv) %>% reduce(full_join) pgthin <- pgdat %>% dplyr::rename(date = `Date/Time`, temp = `Mean Temp (°C)`, precip = `Total Precip (mm)`) %>% dplyr::select(date,temp,precip) pgmonthly <- pgthin %>% mutate(year = as.factor(year(date)), month = month(date,label = T, abbr = T)) %>% group_by(year,month) %>% summarise(temp = mean(temp,na.rm=T), precip = sum(precip, na.rm = T)) pgmonthly %>% ggplot()+ geom_line(aes(month,precip,group = year, colour = year)) pgnorm <- read_csv("Climate-Comparisons/pgnorm.csv") %>% dplyr::select(-snowd,-rain) %>% filter(month != "Annual") %>% mutate(year = "Historical Normal") pgnorm$month <- as.ordered(factor(pgnorm$month, levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun" ,"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"))) pgclimate <- full_join(pgnorm,pgmonthly) p1 <- pgclimate %>% ggplot()+ geom_col(aes(month,precip,group = year, fill = year),position = "dodge")+ labs(x = "Month", y = "Precipitation (mm)")+ scale_fill_discrete(name = "Year")+ theme(legend.position = c(.15,.75)) p2 <- pgclimate %>% ggplot()+ geom_line(aes(month,temp,group =year, colour = year), size = .75)+ geom_abline(slope = 0,intercept = 0, linetype = "dashed")+ labs(x = "Month", y = "Air Temperature (°C)")+ scale_colour_discrete(name = "Year")+ theme(legend.position = c(.15,.75)) grid.arrange(p1,p2) ########################################################################### # DOME MOUNTAIN DATA PLOTTING --------------------------------------------- # SWE DATA WRANGLING---------------------------------------------------------------- domesw <- read_csv("Climate-Comparisons/other climate data/DataSetExport-SW.Telemetry@1A19P-20210412182208.csv",skip = 2) %>% rename(datetime = `Timestamp (UTC)`, swe.mm = `Value (Millimetres)`) %>% dplyr::select(datetime,swe.mm) %>% mutate(year = year(datetime), yday = yday(datetime), date = date(datetime)) %>% filter(swe.mm>=0 & datetime >= ymd_hms("2006-10-20 07:00:00")) %>% group_by(date) %>% mutate(daily.swe.mm = mean(swe.mm)) %>% ungroup() %>% group_by(yday) %>% mutate(mean.swe.plt = mean(swe.mm)) %>% filter(date >= ymd("2018-10-01")) %>% ungroup() domestudy <- domesw %>% filter(date >= ymd("2018-10-01") & date <= ymd("2020-09-30")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020")) %>% mutate(wyday = case_when( yday >= 0 & yday < 274 ~ yday + 91, yday >=274 ~ yday-274)) # p <- domesw %>% # ggplot()+ # geom_line(aes(x = date, y = daily.swe.mm)) # ggplotly(p) domeswavg <- domestudy %>% select(wyday, wateryear, mean.swe.plt) %>% rename(swe = "mean.swe.plt") %>% mutate(wateryear = "16 Year Mean") domeswdat <- domestudy %>% select(wyday, wateryear, daily.swe.mm) %>% rename(swe = "daily.swe.mm") domeswe <- bind_rows(domeswavg,domeswdat) domeswe$wateryear = factor(domeswe$wateryear, levels = c("2018/2019","2019/2020","16 Year Mean")) # SWE PLOT ---------------------------------------------------------------- p1 <- domeswe %>% filter(wyday <= 340) %>% ggplot()+ geom_line(aes(wyday, swe, colour = wateryear), size =1)+ scale_x_continuous(breaks = c(0,31,61,92,123,151,182,213,242,273,304,334), labels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May","Jun","Jul", "Aug","Sep"), minor_breaks = NULL)+ scale_colour_manual(name = "Water Year",values=c("#F8766D", "#00BFC4", "#999999"))+ labs(x = "Month", y = "Snow Water Equivalent (mm)")+ theme(legend.position = "none") ggplotly(p1) ############################################################################### # MONTHLY AIR TEMPERATURE LINE PLOT --------------------------------------- dometa <- read_csv("Climate-Comparisons/other climate data/DataSetExport-TA.Working@1A19P-20210414221824.csv", skip = 2) %>% rename(datetime = `Timestamp (UTC)`, temp = `Value (Celsius)`) %>% select(datetime, temp) %>% mutate(date = date(datetime), yday = yday(date), month = month(date, label = T)) %>% group_by(month) %>% mutate(mn.temp = mean(temp,na.rm = T)) %>% ungroup() %>% filter(date >= ymd("2018-10-01")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020", TRUE ~ "NA")) %>% group_by(wateryear,month) %>% mutate(mn.monthly.temp = mean(temp,na.rm=T)) p <- dometa %>% ggplot()+ geom_line(aes(date,temp)) ggplotly(p) dometa$month = factor(dometa$month, levels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May", "Jun","Jul", "Aug","Sep")) dometavg <- dometa %>% select(wateryear, mn.temp) %>% rename(temp = "mn.temp") %>% mutate(wateryear = "14 Year Mean") dometdat <- dometa %>% select(wateryear, mn.monthly.temp) %>% rename(temp = "mn.monthly.temp") dometemp <- bind_rows(dometavg,dometdat) dometemp$wateryear = factor(dometemp$wateryear, levels = c("2018/2019","2019/2020","16 Year Mean")) # TEMPERATURE PLOTTING ---------------------------------------------------- p2 <- dometemp %>% ggplot()+ geom_line(aes(x = month, y = temp, colour = wateryear, group = wateryear), size =1)+ geom_abline(slope = 0,intercept = 0, linetype = "dashed")+ scale_colour_manual(name = "Water Year", values=c("#F8766D", "#00BFC4", "#999999"))+ labs(x = "Month", y = "Air Temperature (°C)")+ theme(legend.position = "none") ggplotly(p2) ########################################################################### # Precipitation Data Wrangle ---------------------------------------------- domepc <- read_csv("Climate-Comparisons/other climate data/DataSetExport-PC.Working@1A19P-20210414154037.csv", skip = 2) %>% rename(datetime = `Timestamp (UTC)`, rain = `Value (Millimetres)`) %>% select(datetime,rain) %>% mutate(date = date(datetime), yday = yday(date), month = month(date, label = T), year = year(date)) %>% filter(date <= ymd("2020-09-30")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020", TRUE ~ "NA")) %>% filter(year >= "2014" | year <= "2011") %>% group_by(date) %>% summarise(mn.daily.val = mean(rain,na.rm = T), date = date(datetime), yday = yday(date), month = month(date, label = T), year = year(date), wateryear = wateryear) %>% ungroup() %>% mutate(val = mn.daily.val - lag(mn.daily.val, 1), val = case_when( val < 0 ~ 0, TRUE ~ val )) %>% ungroup() %>% group_by(month,year) %>% mutate(sm.month = sum(val, na.rm = T)) %>% ungroup() %>% group_by(month) %>% mutate(mn.month = mean(sm.month, na.rm = T)) %>% select(year,month,sm.month,mn.month,wateryear) %>% filter(wateryear != "NA") %>% distinct() domeavg <- domepc %>% select(month, mn.month) %>% rename(mm.precip = "mn.month") %>% mutate(wateryear = "14 Year Mean") domeyrs <- domepc %>% select(-mn.month,-year) %>% rename(mm.precip = "sm.month") %>% distinct() domepc <- bind_rows(domeavg,domeyrs) %>% distinct() domepc$month = factor(domepc$month, levels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May", "Jun","Jul", "Aug","Sep")) domepc$wateryear = factor(domepc$wateryear, levels = c("2018/2019","2019/2020","14 Year Mean")) # Precipitation Column Plot ----------------------------------------------- p3 <- domepc %>% ggplot()+ geom_col(aes(month, mm.precip, fill = wateryear), position = "dodge")+ scale_fill_manual(values=c("#F8766D", "#00BFC4", "#999999"), name = "Water Year")+ labs(x = "Month", y = "Precipitation (mm)")+ theme(legend.position = c(.92,.88), legend.title = element_text(size = 10), legend.text = element_text(size = 8)) ggplotly(p3) # Precipitation Ratios precip.ratio <- domepc %>% mutate(wateryear = case_when( wateryear == "14 Year Mean" ~ "mn", wateryear == "2018/2019" ~ "wy1819", wateryear == "2019/2020" ~ "wy1920" )) precip.ratio <- precip.ratio%>% pivot_wider(names_from = wateryear, values_from = mm.precip) precip.ratio <- precip.ratio %>% mutate(rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) %>% ungroup() totals <- precip.ratio %>% summarise(month = as.character("Precipitation"), mn = sum(mn), wy1819 = sum(wy1819), wy1920 = sum(wy1920), rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) total.rn <- precip.ratio %>% filter(month %in% c("May","Jun","Jul","Aug","Sep")) %>% summarise(month = as.character("Rain"), mn = 1422.4-864.4, wy1819 = 1463.0-762.8, wy1920 = 1759.2-1082.8, rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) total.sn <- precip.ratio %>% summarise(month = as.character("Snow"), mn = 864.4, wy1819 = 762.8, wy1920 = 1082.8, rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) totals$month <- as.character(totals$month) precip.summary <- bind_rows(totals, total.rn,total.sn) %>% select(month,mn, wy1819,rto1819,wy1920,rto1920) # precip.summary$month <- factor(precip.summary$month, levels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec","Total", "Total Rain","Total Snow")) precip.summary$month <- factor(precip.summary$month, levels = c("Rain","Snow","Precipitation")) precip.summary <- precip.summary %>% arrange(month) # Precip ratio table table <- kableExtra::kbl(precip.summary,digits = 1, col.names = c("-","(mm)", "(mm)", "% Mean", "(mm)", "% Mean"),align = "c",)%>% kable_styling(bootstrap_options = c("striped", "hover"),font_size = 12) %>% add_header_above(c("Totals","14 Year Mean", "2018/2019 " = 2, "2019/2020 " = 2), align = "center") %>% column_spec(column = c(1,2,4),border_right = T) kableExtra::as_image(table, width = 6.5,file = "DomePrecip.png") snow2rain <- bind_rows(total.rn,total.sn) snow2raintot <- snow2rain %>% summarise(month = "Rain:Snow", mn = 558/864, wy1819 = 700/763, wy1920 = 676/1083) # PLOT THE GRID ----------------------------------------------------------- DomeGrid <- grid.arrange(p1,p3,p2) ggsave(plot = DomeGrid, filename = "DomeGrid.png", device = "png", width = 6.5,height = 7.2, units = "in") # # # # domeswe <- domesw %>% # select(-mean.swe.plt) %>% # rename(value = "swe.mm") %>% # mutate(name = "swe") # # domedat <- bind_rows(domeswe,domepc,dometa) %>% # filter(date >= "2019-01-01") # # # p <- domedat %>% # ggplot(aes(x = date, y = value))+ # geom_col(data = filter(.data = domedat, name == "rain"))+ # geom_line(data = filter(.data = domedat, name == "swe"))+ # geom_line(data = filter(.data = domedat, name == "temp"))+ # facet_grid(rows = vars(name), scales = "free") # ggplotly(p) # # domefinal <- domedat %>% # rename(Parameter = "name") %>% # select(date,value,Parameter) %>% # mutate(Site = "Dome") # # write_csv(domefinal,"Climate-Comparisons/DomeMtn.csv" )
/Climate-Comparisons/Climate-Comparisons.R
no_license
Jhydromet/Ancient-Forest_Wetland-Hydrology
R
false
false
11,766
r
library(tidyverse) library(lubridate) library(gridExtra) library(plotly) library(roll) library(magick) library(kableExtra) # Prince George Airport Plot ---------------------------------------------- # Comparing 2019, 2020 with 1989-2010 climate normals pgdat <- paste0("Climate-Comparisons/pg_daily_data/", dir(path = "Climate-Comparisons/pg_daily_data/")) pgdat <- pgdat %>% map(read_csv) %>% reduce(full_join) pgthin <- pgdat %>% dplyr::rename(date = `Date/Time`, temp = `Mean Temp (°C)`, precip = `Total Precip (mm)`) %>% dplyr::select(date,temp,precip) pgmonthly <- pgthin %>% mutate(year = as.factor(year(date)), month = month(date,label = T, abbr = T)) %>% group_by(year,month) %>% summarise(temp = mean(temp,na.rm=T), precip = sum(precip, na.rm = T)) pgmonthly %>% ggplot()+ geom_line(aes(month,precip,group = year, colour = year)) pgnorm <- read_csv("Climate-Comparisons/pgnorm.csv") %>% dplyr::select(-snowd,-rain) %>% filter(month != "Annual") %>% mutate(year = "Historical Normal") pgnorm$month <- as.ordered(factor(pgnorm$month, levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun" ,"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"))) pgclimate <- full_join(pgnorm,pgmonthly) p1 <- pgclimate %>% ggplot()+ geom_col(aes(month,precip,group = year, fill = year),position = "dodge")+ labs(x = "Month", y = "Precipitation (mm)")+ scale_fill_discrete(name = "Year")+ theme(legend.position = c(.15,.75)) p2 <- pgclimate %>% ggplot()+ geom_line(aes(month,temp,group =year, colour = year), size = .75)+ geom_abline(slope = 0,intercept = 0, linetype = "dashed")+ labs(x = "Month", y = "Air Temperature (°C)")+ scale_colour_discrete(name = "Year")+ theme(legend.position = c(.15,.75)) grid.arrange(p1,p2) ########################################################################### # DOME MOUNTAIN DATA PLOTTING --------------------------------------------- # SWE DATA WRANGLING---------------------------------------------------------------- domesw <- read_csv("Climate-Comparisons/other climate data/DataSetExport-SW.Telemetry@1A19P-20210412182208.csv",skip = 2) %>% rename(datetime = `Timestamp (UTC)`, swe.mm = `Value (Millimetres)`) %>% dplyr::select(datetime,swe.mm) %>% mutate(year = year(datetime), yday = yday(datetime), date = date(datetime)) %>% filter(swe.mm>=0 & datetime >= ymd_hms("2006-10-20 07:00:00")) %>% group_by(date) %>% mutate(daily.swe.mm = mean(swe.mm)) %>% ungroup() %>% group_by(yday) %>% mutate(mean.swe.plt = mean(swe.mm)) %>% filter(date >= ymd("2018-10-01")) %>% ungroup() domestudy <- domesw %>% filter(date >= ymd("2018-10-01") & date <= ymd("2020-09-30")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020")) %>% mutate(wyday = case_when( yday >= 0 & yday < 274 ~ yday + 91, yday >=274 ~ yday-274)) # p <- domesw %>% # ggplot()+ # geom_line(aes(x = date, y = daily.swe.mm)) # ggplotly(p) domeswavg <- domestudy %>% select(wyday, wateryear, mean.swe.plt) %>% rename(swe = "mean.swe.plt") %>% mutate(wateryear = "16 Year Mean") domeswdat <- domestudy %>% select(wyday, wateryear, daily.swe.mm) %>% rename(swe = "daily.swe.mm") domeswe <- bind_rows(domeswavg,domeswdat) domeswe$wateryear = factor(domeswe$wateryear, levels = c("2018/2019","2019/2020","16 Year Mean")) # SWE PLOT ---------------------------------------------------------------- p1 <- domeswe %>% filter(wyday <= 340) %>% ggplot()+ geom_line(aes(wyday, swe, colour = wateryear), size =1)+ scale_x_continuous(breaks = c(0,31,61,92,123,151,182,213,242,273,304,334), labels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May","Jun","Jul", "Aug","Sep"), minor_breaks = NULL)+ scale_colour_manual(name = "Water Year",values=c("#F8766D", "#00BFC4", "#999999"))+ labs(x = "Month", y = "Snow Water Equivalent (mm)")+ theme(legend.position = "none") ggplotly(p1) ############################################################################### # MONTHLY AIR TEMPERATURE LINE PLOT --------------------------------------- dometa <- read_csv("Climate-Comparisons/other climate data/DataSetExport-TA.Working@1A19P-20210414221824.csv", skip = 2) %>% rename(datetime = `Timestamp (UTC)`, temp = `Value (Celsius)`) %>% select(datetime, temp) %>% mutate(date = date(datetime), yday = yday(date), month = month(date, label = T)) %>% group_by(month) %>% mutate(mn.temp = mean(temp,na.rm = T)) %>% ungroup() %>% filter(date >= ymd("2018-10-01")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020", TRUE ~ "NA")) %>% group_by(wateryear,month) %>% mutate(mn.monthly.temp = mean(temp,na.rm=T)) p <- dometa %>% ggplot()+ geom_line(aes(date,temp)) ggplotly(p) dometa$month = factor(dometa$month, levels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May", "Jun","Jul", "Aug","Sep")) dometavg <- dometa %>% select(wateryear, mn.temp) %>% rename(temp = "mn.temp") %>% mutate(wateryear = "14 Year Mean") dometdat <- dometa %>% select(wateryear, mn.monthly.temp) %>% rename(temp = "mn.monthly.temp") dometemp <- bind_rows(dometavg,dometdat) dometemp$wateryear = factor(dometemp$wateryear, levels = c("2018/2019","2019/2020","16 Year Mean")) # TEMPERATURE PLOTTING ---------------------------------------------------- p2 <- dometemp %>% ggplot()+ geom_line(aes(x = month, y = temp, colour = wateryear, group = wateryear), size =1)+ geom_abline(slope = 0,intercept = 0, linetype = "dashed")+ scale_colour_manual(name = "Water Year", values=c("#F8766D", "#00BFC4", "#999999"))+ labs(x = "Month", y = "Air Temperature (°C)")+ theme(legend.position = "none") ggplotly(p2) ########################################################################### # Precipitation Data Wrangle ---------------------------------------------- domepc <- read_csv("Climate-Comparisons/other climate data/DataSetExport-PC.Working@1A19P-20210414154037.csv", skip = 2) %>% rename(datetime = `Timestamp (UTC)`, rain = `Value (Millimetres)`) %>% select(datetime,rain) %>% mutate(date = date(datetime), yday = yday(date), month = month(date, label = T), year = year(date)) %>% filter(date <= ymd("2020-09-30")) %>% mutate(wateryear = case_when( date >= ymd("2018-10-01") & date <= ymd("2019-09-30") ~ "2018/2019", date >= ymd("2019-10-01") ~ "2019/2020", TRUE ~ "NA")) %>% filter(year >= "2014" | year <= "2011") %>% group_by(date) %>% summarise(mn.daily.val = mean(rain,na.rm = T), date = date(datetime), yday = yday(date), month = month(date, label = T), year = year(date), wateryear = wateryear) %>% ungroup() %>% mutate(val = mn.daily.val - lag(mn.daily.val, 1), val = case_when( val < 0 ~ 0, TRUE ~ val )) %>% ungroup() %>% group_by(month,year) %>% mutate(sm.month = sum(val, na.rm = T)) %>% ungroup() %>% group_by(month) %>% mutate(mn.month = mean(sm.month, na.rm = T)) %>% select(year,month,sm.month,mn.month,wateryear) %>% filter(wateryear != "NA") %>% distinct() domeavg <- domepc %>% select(month, mn.month) %>% rename(mm.precip = "mn.month") %>% mutate(wateryear = "14 Year Mean") domeyrs <- domepc %>% select(-mn.month,-year) %>% rename(mm.precip = "sm.month") %>% distinct() domepc <- bind_rows(domeavg,domeyrs) %>% distinct() domepc$month = factor(domepc$month, levels = c("Oct","Nov","Dec","Jan","Feb","Mar","Apr","May", "Jun","Jul", "Aug","Sep")) domepc$wateryear = factor(domepc$wateryear, levels = c("2018/2019","2019/2020","14 Year Mean")) # Precipitation Column Plot ----------------------------------------------- p3 <- domepc %>% ggplot()+ geom_col(aes(month, mm.precip, fill = wateryear), position = "dodge")+ scale_fill_manual(values=c("#F8766D", "#00BFC4", "#999999"), name = "Water Year")+ labs(x = "Month", y = "Precipitation (mm)")+ theme(legend.position = c(.92,.88), legend.title = element_text(size = 10), legend.text = element_text(size = 8)) ggplotly(p3) # Precipitation Ratios precip.ratio <- domepc %>% mutate(wateryear = case_when( wateryear == "14 Year Mean" ~ "mn", wateryear == "2018/2019" ~ "wy1819", wateryear == "2019/2020" ~ "wy1920" )) precip.ratio <- precip.ratio%>% pivot_wider(names_from = wateryear, values_from = mm.precip) precip.ratio <- precip.ratio %>% mutate(rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) %>% ungroup() totals <- precip.ratio %>% summarise(month = as.character("Precipitation"), mn = sum(mn), wy1819 = sum(wy1819), wy1920 = sum(wy1920), rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) total.rn <- precip.ratio %>% filter(month %in% c("May","Jun","Jul","Aug","Sep")) %>% summarise(month = as.character("Rain"), mn = 1422.4-864.4, wy1819 = 1463.0-762.8, wy1920 = 1759.2-1082.8, rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) total.sn <- precip.ratio %>% summarise(month = as.character("Snow"), mn = 864.4, wy1819 = 762.8, wy1920 = 1082.8, rto1819 = 100*wy1819/mn, rto1920 = 100*wy1920/mn) totals$month <- as.character(totals$month) precip.summary <- bind_rows(totals, total.rn,total.sn) %>% select(month,mn, wy1819,rto1819,wy1920,rto1920) # precip.summary$month <- factor(precip.summary$month, levels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec","Total", "Total Rain","Total Snow")) precip.summary$month <- factor(precip.summary$month, levels = c("Rain","Snow","Precipitation")) precip.summary <- precip.summary %>% arrange(month) # Precip ratio table table <- kableExtra::kbl(precip.summary,digits = 1, col.names = c("-","(mm)", "(mm)", "% Mean", "(mm)", "% Mean"),align = "c",)%>% kable_styling(bootstrap_options = c("striped", "hover"),font_size = 12) %>% add_header_above(c("Totals","14 Year Mean", "2018/2019 " = 2, "2019/2020 " = 2), align = "center") %>% column_spec(column = c(1,2,4),border_right = T) kableExtra::as_image(table, width = 6.5,file = "DomePrecip.png") snow2rain <- bind_rows(total.rn,total.sn) snow2raintot <- snow2rain %>% summarise(month = "Rain:Snow", mn = 558/864, wy1819 = 700/763, wy1920 = 676/1083) # PLOT THE GRID ----------------------------------------------------------- DomeGrid <- grid.arrange(p1,p3,p2) ggsave(plot = DomeGrid, filename = "DomeGrid.png", device = "png", width = 6.5,height = 7.2, units = "in") # # # # domeswe <- domesw %>% # select(-mean.swe.plt) %>% # rename(value = "swe.mm") %>% # mutate(name = "swe") # # domedat <- bind_rows(domeswe,domepc,dometa) %>% # filter(date >= "2019-01-01") # # # p <- domedat %>% # ggplot(aes(x = date, y = value))+ # geom_col(data = filter(.data = domedat, name == "rain"))+ # geom_line(data = filter(.data = domedat, name == "swe"))+ # geom_line(data = filter(.data = domedat, name == "temp"))+ # facet_grid(rows = vars(name), scales = "free") # ggplotly(p) # # domefinal <- domedat %>% # rename(Parameter = "name") %>% # select(date,value,Parameter) %>% # mutate(Site = "Dome") # # write_csv(domefinal,"Climate-Comparisons/DomeMtn.csv" )
#' The 'stanmd' package. #' #' @description A DESCRIPTION OF THE PACKAGE #' #' @docType package #' @name stanmd-package #' @aliases stanmd #' @useDynLib stanmd, .registration = TRUE #' @import methods #' @import Rcpp #' @importFrom rstan sampling #' #' @references #' Stan Development Team (2019). RStan: the R interface to Stan. R package version 2.19.2. https://mc-stan.org #' NULL
/R/stanmd-package.R
no_license
medewitt/stanmd
R
false
false
384
r
#' The 'stanmd' package. #' #' @description A DESCRIPTION OF THE PACKAGE #' #' @docType package #' @name stanmd-package #' @aliases stanmd #' @useDynLib stanmd, .registration = TRUE #' @import methods #' @import Rcpp #' @importFrom rstan sampling #' #' @references #' Stan Development Team (2019). RStan: the R interface to Stan. R package version 2.19.2. https://mc-stan.org #' NULL
#' PheVis simulated dataset #' #' Simulated dataset for PheVis phenotyping. #' #' @docType data #' #' @usage data(data_phevis) #' #' @keywords datasets "data_phevis"
/R/data_phevis.R
no_license
cran/PheVis
R
false
false
165
r
#' PheVis simulated dataset #' #' Simulated dataset for PheVis phenotyping. #' #' @docType data #' #' @usage data(data_phevis) #' #' @keywords datasets "data_phevis"
#' @title Step 5 - Build Phylogenetic Tree #' #' @description The phangorn R package is then used to construct a phylogenetic #' tree. First, a neighbor-joining tree is constructed and then fit to a GTR+G+I #' maximum likelihood tree using the neighbor-joining tree as a starting point. #' #' @import dada2 DECIPHER phangorn #' #' @export ###Build phylogenetic tree### build_phylo_tree <- function(){ seqs <<- dada2::getSequences(seqtab) names(seqs) <<- seqs # This propagates to the tip labels of the tree alignment <<- DECIPHER::AlignSeqs(Biostrings::DNAStringSet(seqs), anchor=NA) phang.align <<- phangorn::phyDat(as(alignment, "matrix"), type="DNA") dm <<- phangorn::dist.ml(phang.align) treeNJ <<- phangorn::NJ(dm) # Note, tip order != sequence order fit <<- phangorn::pml(treeNJ, data=phang.align) fitGTR <- update(fit, k=4, inv=0.2) fitGTR <<- phangorn::optim.pml(fitGTR, model="GTR", optInv=TRUE, optGamma=TRUE, rearrangement = "stochastic", control = phangorn::pml.control(trace = 0)) }
/R/5_buildphylotree.R
no_license
seanstuntz/BAW
R
false
false
1,038
r
#' @title Step 5 - Build Phylogenetic Tree #' #' @description The phangorn R package is then used to construct a phylogenetic #' tree. First, a neighbor-joining tree is constructed and then fit to a GTR+G+I #' maximum likelihood tree using the neighbor-joining tree as a starting point. #' #' @import dada2 DECIPHER phangorn #' #' @export ###Build phylogenetic tree### build_phylo_tree <- function(){ seqs <<- dada2::getSequences(seqtab) names(seqs) <<- seqs # This propagates to the tip labels of the tree alignment <<- DECIPHER::AlignSeqs(Biostrings::DNAStringSet(seqs), anchor=NA) phang.align <<- phangorn::phyDat(as(alignment, "matrix"), type="DNA") dm <<- phangorn::dist.ml(phang.align) treeNJ <<- phangorn::NJ(dm) # Note, tip order != sequence order fit <<- phangorn::pml(treeNJ, data=phang.align) fitGTR <- update(fit, k=4, inv=0.2) fitGTR <<- phangorn::optim.pml(fitGTR, model="GTR", optInv=TRUE, optGamma=TRUE, rearrangement = "stochastic", control = phangorn::pml.control(trace = 0)) }
rm(list=ls()) #args=(commandArgs(TRUE)) #for(p in 1:length(args)){ # eval(parse(text=args[[p]])) # } #print(i) #i1 <- i #commandarg <- commandArgs(trailingOnly=F) #myarg <- commandarg[length(commandarg)] #myarg <- sub("-","",myarg) #i <- as.numeric(myarg) #print(i) #pheno is ICOGS,data2 is onco_array arg <- commandArgs(trailingOnly=T) i <- as.numeric(arg[[1]]) i1 <- i print(i) library(R.utils) setwd("/data/zhangh24/breast_cancer_data_analysis/") n <- 109713 snpvalue <- rep(0,n) subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz" Icog.order <- read.table(gzfile(subject.file)) pheno.file <- "./data/pheno.Icog" load(pheno.file) n.sub = nrow(pheno) y.pheno.mis1 <- cbind(pheno$Behaviour1,pheno$PR_status1,pheno$ER_status1,pheno$HER2_status1) colnames(y.pheno.mis1) = c("Behaviour1","PR_status1", "ER_status1","HER2_status1") idx.fil <- Icog.order[,1]%in%pheno$SG_ID idx.match <- match(pheno$SG_ID,Icog.order[idx.fil,1]) #Icog.order.match <- Icog.order[idx.fil,1][idx.match] library(bc2) load("./whole_genome/ICOG/ERPRHER2_fixed/result/score.test.support.icog.ERPRHER2.Rdata") Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/" Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T) Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T) idx.sex <- Files%in%Filesex Files <- Files[!idx.sex] geno.file <- Files[i1] tryCatch( { num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T)) }, error=function(cond){ num <- countLines(geno.file)[1] } ) #num = 22349 #num <- countLines(geno.file)[1]; #num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T)) num.of.tumor <- ncol(y.pheno.mis1)-1 score_result <- matrix(0,num,num.of.tumor+1) infor_result <- matrix(0,(num.of.tumor+1)*num,num.of.tumor+1) snpid_result <- rep("c",num) freq.all <- rep(0,num) con <- gzfile(geno.file) open(con) for(i in 1:num){ if(i%%500==0){ print(i) } oneLine <- readLines(con,n=1) myVector <- strsplit(oneLine," ") snpid <- as.character(myVector[[1]][2]) snpid_result[i] <- snpid snpvalue <- rep(0,n) snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])]) if(length(snppro)!=(3*n)){ break } snpvalue <- convert(snppro,n) snpvalue <- snpvalue[idx.fil][idx.match] freq <- sum(snpvalue)/(2*n.sub) freq.all[i] <- freq #print(paste0("freq",freq)) tryCatch( { if(freq<0.005|freq>0.995){ score_result[i,] <- 0 infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- 0 }else{ score.test.icog<- ScoreTest(y=y.pheno.mis1, x=snpvalue, second.stage.structure="additive", score.test.support=score.test.support.icog.ERPRHER2, missingTumorIndicator=888) score_result[i,] <- score.test.icog[[1]] infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- score.test.icog[[2]] } }, error=function(cond) { score_result[i,] <- 0 infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- 0 }) } close(con) if(i !=num){ snpid_result <- snpid_result[1:i] score_result <- score_result[1:i,] infor_result <- infor_result[1:((num.of.tumor+1)*i),] } result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all) save(result,file=paste0("./whole_genome/ICOG/ERPRHER2_fixed/result/ERPRHER2_fixed.Rdata",i1))
/whole_genome/ICOG/ERPRHER2_fixed/code/whole_genome_ERPRHER2_fixed.R
no_license
andrewhaoyu/breast_cancer_data_analysis
R
false
false
3,701
r
rm(list=ls()) #args=(commandArgs(TRUE)) #for(p in 1:length(args)){ # eval(parse(text=args[[p]])) # } #print(i) #i1 <- i #commandarg <- commandArgs(trailingOnly=F) #myarg <- commandarg[length(commandarg)] #myarg <- sub("-","",myarg) #i <- as.numeric(myarg) #print(i) #pheno is ICOGS,data2 is onco_array arg <- commandArgs(trailingOnly=T) i <- as.numeric(arg[[1]]) i1 <- i print(i) library(R.utils) setwd("/data/zhangh24/breast_cancer_data_analysis/") n <- 109713 snpvalue <- rep(0,n) subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz" Icog.order <- read.table(gzfile(subject.file)) pheno.file <- "./data/pheno.Icog" load(pheno.file) n.sub = nrow(pheno) y.pheno.mis1 <- cbind(pheno$Behaviour1,pheno$PR_status1,pheno$ER_status1,pheno$HER2_status1) colnames(y.pheno.mis1) = c("Behaviour1","PR_status1", "ER_status1","HER2_status1") idx.fil <- Icog.order[,1]%in%pheno$SG_ID idx.match <- match(pheno$SG_ID,Icog.order[idx.fil,1]) #Icog.order.match <- Icog.order[idx.fil,1][idx.match] library(bc2) load("./whole_genome/ICOG/ERPRHER2_fixed/result/score.test.support.icog.ERPRHER2.Rdata") Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/" Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T) Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T) idx.sex <- Files%in%Filesex Files <- Files[!idx.sex] geno.file <- Files[i1] tryCatch( { num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T)) }, error=function(cond){ num <- countLines(geno.file)[1] } ) #num = 22349 #num <- countLines(geno.file)[1]; #num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T)) num.of.tumor <- ncol(y.pheno.mis1)-1 score_result <- matrix(0,num,num.of.tumor+1) infor_result <- matrix(0,(num.of.tumor+1)*num,num.of.tumor+1) snpid_result <- rep("c",num) freq.all <- rep(0,num) con <- gzfile(geno.file) open(con) for(i in 1:num){ if(i%%500==0){ print(i) } oneLine <- readLines(con,n=1) myVector <- strsplit(oneLine," ") snpid <- as.character(myVector[[1]][2]) snpid_result[i] <- snpid snpvalue <- rep(0,n) snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])]) if(length(snppro)!=(3*n)){ break } snpvalue <- convert(snppro,n) snpvalue <- snpvalue[idx.fil][idx.match] freq <- sum(snpvalue)/(2*n.sub) freq.all[i] <- freq #print(paste0("freq",freq)) tryCatch( { if(freq<0.005|freq>0.995){ score_result[i,] <- 0 infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- 0 }else{ score.test.icog<- ScoreTest(y=y.pheno.mis1, x=snpvalue, second.stage.structure="additive", score.test.support=score.test.support.icog.ERPRHER2, missingTumorIndicator=888) score_result[i,] <- score.test.icog[[1]] infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- score.test.icog[[2]] } }, error=function(cond) { score_result[i,] <- 0 infor_result[((num.of.tumor+1)*i-(num.of.tumor)):((num.of.tumor+1)*i),] <- 0 }) } close(con) if(i !=num){ snpid_result <- snpid_result[1:i] score_result <- score_result[1:i,] infor_result <- infor_result[1:((num.of.tumor+1)*i),] } result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all) save(result,file=paste0("./whole_genome/ICOG/ERPRHER2_fixed/result/ERPRHER2_fixed.Rdata",i1))
setwd('/local/home/shrestha/Documents/Thesis/counqer/classifier_crowd_annotations') library(dplyr) ############### Counting results <- read.csv('counting/figure-eight/f1411878.csv') results.minus.gold <- results[which(results$X_golden == "false"), -c(16:20)] disabled_ids <- c(2393883419, 2398349110, 2393883423, 2393883422, 2393883421, 2393883420, 2393883418, 2393883417, 2393883416, 2393883415, 2393883411, 2393883410, 2393883409, 2393883408, 2393883407, 2393883405, 2393883404, 2393883403, 2393883402, 2393883401, 2393883400, 2393883399, 2393883398) results.minus.gold <- results.minus.gold[which(!(results.minus.gold$X_unit_id %in% disabled_ids)),] results.grouped <- results.minus.gold %>% group_by(predicate, p_label) %>% summarise(judgements = n(), yes = sum(does_the_relation_give_a_count_of_unique_entities == 'yes'), maybe_yes = sum(does_the_relation_give_a_count_of_unique_entities == 'maybe_yes'), maybe_no = sum(does_the_relation_give_a_count_of_unique_entities == 'maybe_no'), no = sum(does_the_relation_give_a_count_of_unique_entities == 'no'), do_not_know = sum(does_the_relation_give_a_count_of_unique_entities == 'do_not_know')) fb_p_labels <- read.csv('counting/p_labels_fb.csv') fb_p_labels[] <- lapply(fb_p_labels, function(x) if (is.factor(x)) as.character(x) else {x}) results.grouped[] <- lapply(results.grouped, function(x) if (is.factor(x)) as.character(x) else {x}) results.grouped$p_label[results.grouped$predicate %in% fb_p_labels$predicate] <- inner_join(results.grouped[results.grouped$predicate %in% fb_p_labels$predicate,-c(2)], fb_p_labels[fb_p_labels$predicate %in% results.grouped$predicate,], by='predicate')$p_label results.grouped$final <- ifelse((results.grouped$yes+results.grouped$maybe_yes > results.grouped$no+results.grouped$maybe_no),1, ifelse((results.grouped$no+results.grouped$maybe_no > results.grouped$yes+results.grouped$maybe_yes), 0,0.5)) write.csv(results.grouped, 'counting/figure-eight/f1411878_grouped.csv', row.names = F) ############### Enumerating results <- read.csv('enumerating/figure-eight/f1413990.csv') results.minus.gold <- results[which(results$X_golden == 'false'), ] disabled_ids <- c(2401090584, 2401090602, 2401090608, 2401090611, 2401090613) results.minus.gold <- results.minus.gold[which(!(results.minus.gold$X_unit_id %in% disabled_ids)),] results.grouped <- results.minus.gold %>% group_by(predicate, p_label) %>% summarise(judgements = n(), yes = sum(does_the_relation_enumerate_entities == 'yes'), maybe_yes = sum(does_the_relation_enumerate_entities == 'maybe_yes'), maybe_no = sum(does_the_relation_enumerate_entities == 'maybe_no'), no = sum(does_the_relation_enumerate_entities == 'no'), do_not_know = sum(does_the_relation_enumerate_entities == 'do_not_know')) results.grouped$final <- ifelse((results.grouped$yes+results.grouped$maybe_yes > results.grouped$no+results.grouped$maybe_no),1, ifelse((results.grouped$no+results.grouped$maybe_no > results.grouped$yes+results.grouped$maybe_yes), 0,0.5)) write.csv(results.grouped, 'enumerating/figure-eight/f1413990_grouped.csv', row.names = F)
/classifier_crowd_annotations/view_crowd_results.R
no_license
ghoshs/counqer_v1
R
false
false
3,530
r
setwd('/local/home/shrestha/Documents/Thesis/counqer/classifier_crowd_annotations') library(dplyr) ############### Counting results <- read.csv('counting/figure-eight/f1411878.csv') results.minus.gold <- results[which(results$X_golden == "false"), -c(16:20)] disabled_ids <- c(2393883419, 2398349110, 2393883423, 2393883422, 2393883421, 2393883420, 2393883418, 2393883417, 2393883416, 2393883415, 2393883411, 2393883410, 2393883409, 2393883408, 2393883407, 2393883405, 2393883404, 2393883403, 2393883402, 2393883401, 2393883400, 2393883399, 2393883398) results.minus.gold <- results.minus.gold[which(!(results.minus.gold$X_unit_id %in% disabled_ids)),] results.grouped <- results.minus.gold %>% group_by(predicate, p_label) %>% summarise(judgements = n(), yes = sum(does_the_relation_give_a_count_of_unique_entities == 'yes'), maybe_yes = sum(does_the_relation_give_a_count_of_unique_entities == 'maybe_yes'), maybe_no = sum(does_the_relation_give_a_count_of_unique_entities == 'maybe_no'), no = sum(does_the_relation_give_a_count_of_unique_entities == 'no'), do_not_know = sum(does_the_relation_give_a_count_of_unique_entities == 'do_not_know')) fb_p_labels <- read.csv('counting/p_labels_fb.csv') fb_p_labels[] <- lapply(fb_p_labels, function(x) if (is.factor(x)) as.character(x) else {x}) results.grouped[] <- lapply(results.grouped, function(x) if (is.factor(x)) as.character(x) else {x}) results.grouped$p_label[results.grouped$predicate %in% fb_p_labels$predicate] <- inner_join(results.grouped[results.grouped$predicate %in% fb_p_labels$predicate,-c(2)], fb_p_labels[fb_p_labels$predicate %in% results.grouped$predicate,], by='predicate')$p_label results.grouped$final <- ifelse((results.grouped$yes+results.grouped$maybe_yes > results.grouped$no+results.grouped$maybe_no),1, ifelse((results.grouped$no+results.grouped$maybe_no > results.grouped$yes+results.grouped$maybe_yes), 0,0.5)) write.csv(results.grouped, 'counting/figure-eight/f1411878_grouped.csv', row.names = F) ############### Enumerating results <- read.csv('enumerating/figure-eight/f1413990.csv') results.minus.gold <- results[which(results$X_golden == 'false'), ] disabled_ids <- c(2401090584, 2401090602, 2401090608, 2401090611, 2401090613) results.minus.gold <- results.minus.gold[which(!(results.minus.gold$X_unit_id %in% disabled_ids)),] results.grouped <- results.minus.gold %>% group_by(predicate, p_label) %>% summarise(judgements = n(), yes = sum(does_the_relation_enumerate_entities == 'yes'), maybe_yes = sum(does_the_relation_enumerate_entities == 'maybe_yes'), maybe_no = sum(does_the_relation_enumerate_entities == 'maybe_no'), no = sum(does_the_relation_enumerate_entities == 'no'), do_not_know = sum(does_the_relation_enumerate_entities == 'do_not_know')) results.grouped$final <- ifelse((results.grouped$yes+results.grouped$maybe_yes > results.grouped$no+results.grouped$maybe_no),1, ifelse((results.grouped$no+results.grouped$maybe_no > results.grouped$yes+results.grouped$maybe_yes), 0,0.5)) write.csv(results.grouped, 'enumerating/figure-eight/f1413990_grouped.csv', row.names = F)
library(xlsx) df<- read.xlsx("/home/shauny/Desktop/yeildhort2032015/FRUITS/BANANA.xlsx",sheetIndex=1, header=TRUE) df colnames(df)[colnames(df) == 'STATES.Uts.'] <- 'STATES' colnames(df)[colnames(df) == 'X2011.12'] <- '2011-2012(A)' colnames(df)[colnames(df) == 'NA.'] <- '2011-2012(P)' colnames(df)[colnames(df) == 'X2012.13'] <- '2012-2013(A)' colnames(df)[colnames(df) == 'NA..1'] <- '2012-2013(P)' colnames(df)[colnames(df) == 'X2013.14'] <- '2013-2014(A)' colnames(df)[colnames(df) == 'NA..2'] <- '2013-2014(P)' colnames(df)[colnames(df) == 'X2011.12.1'] <- '2011-2012(Yield_Hort(P))' colnames(df)[colnames(df) == 'X2012.13.1'] <- '2012-2013(Yield_Hort(P))' colnames(df)[colnames(df) == 'X2013.14.1'] <- '2013-2014(Yield_Hort(P))' colnames(df)[colnames(df) == 'X..Change.in.2012.13.Over.2011.12'] <- '% change in 2012-2013 over 2011-2012(A)' colnames(df)[colnames(df) == 'NA..3'] <- '% change in 2012-2013 over 2011-2012(P)' colnames(df)[colnames(df) == 'X..Change.in.2013.14.Over.2012.13'] <- '% change in 2013-2014 over 2012-2013(A)' colnames(df)[colnames(df) == 'NA..4'] <- '% change in 2013-2014 over 2012-2013(P)' rownames(df) <- 1:nrow(df) df[is.na(df)] <- 0 df library(plotly) #..............BANANA PRODUCTION IN 2011-2012........................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2011-2012(P)`, type = 'scatter', mode = 'markers', size = ~`2011-2012(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Production of BANANA in 2011-2012', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) .............................................................................................................................................................. #...............................BANANA PRODUCTION IN 2012-2013................................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2012-2013(P)`, type = 'scatter', mode = 'markers', size = ~`2012-2013(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2012-2013 BANANA Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #.............................BANANA PRODUCTION IN 2013-2014............................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2013-2014(P)`, type = 'scatter', mode = 'markers', size = ~`2013-2014(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Production of BANANA in 2013-2014', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #..........BANANA YIELD PRODUCTION IN 2011-2012...................................................................................................... df$`2011-2012(Yield_Hort(P))` <- as.numeric(as.character(df$`2011-2012(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2011-2012(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2011-2012(Yield_Hort(P))`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controllig for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Yield Production of BANANA in 2013-2014', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #............BANANA YIELD PRODUCTION IN 2012-2013....................................................................................................... df$`2012-2013(Yield_Hort(P))` <- as.numeric(as.character(df$`2012-2013(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2012-2013(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2012-2013(Yield_Hort(P))`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2012-2013 BANANA Yield Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #..........BANANA YIELD PRODUCTION IN 2013-2014....................................................................................................... df$`2013-2014(Yield_Hort(P))` <- as.numeric(as.character(df$`2013-2014(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2013-2014(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2013-2014(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2013-2014 BANANA Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #........BANANA PRODUCTION % change in 2012-2013 over 2011-2012......................................................................................................... df$`% change in 2012-2013 over 2011-2012(A)` <- as.numeric(as.character(df$`% change in 2012-2013 over 2011-2012(A)`)) df$`% change in 2012-2013 over 2011-2012(P)` <- as.numeric(as.character(df$`% change in 2012-2013 over 2011-2012(P)`)) plot_ly(df, x = ~`STATES`, y = ~`% change in 2012-2013 over 2011-2012(P)`, type = 'scatter', mode = 'markers', size = ~`% change in 2012-2013 over 2011-2012(P)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 0.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'BANANA Production % change in 2012-2013 over 2011-2012', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #......BANANA PRODUCTION CHANGE % CHANGE IN 2013-2014 OVER 2012-2013............................................................................................................. df$`% change in 2013-2014 over 2012-2013(A)` <- as.numeric(as.character(df$`% change in 2013-2014 over 2012-2013(A)`)) df$`% change in 2013-2014 over 2012-2013(P)` <- as.numeric(as.character(df$`% change in 2013-2014 over 2012-2013(P)`)) plot_ly(df, x = ~`STATES`, y = ~`% change in 2013-2014 over 2012-2013(P)`, type = 'scatter', mode = 'markers', size = ~`% change in 2013-2014 over 2012-2013(P)` , color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'BANANA Production % change in 2013-2014 over 2012-2013' , xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................
/Banana.R
no_license
ShaunyMachado/Horticulture-Data-Visualisation
R
false
false
8,682
r
library(xlsx) df<- read.xlsx("/home/shauny/Desktop/yeildhort2032015/FRUITS/BANANA.xlsx",sheetIndex=1, header=TRUE) df colnames(df)[colnames(df) == 'STATES.Uts.'] <- 'STATES' colnames(df)[colnames(df) == 'X2011.12'] <- '2011-2012(A)' colnames(df)[colnames(df) == 'NA.'] <- '2011-2012(P)' colnames(df)[colnames(df) == 'X2012.13'] <- '2012-2013(A)' colnames(df)[colnames(df) == 'NA..1'] <- '2012-2013(P)' colnames(df)[colnames(df) == 'X2013.14'] <- '2013-2014(A)' colnames(df)[colnames(df) == 'NA..2'] <- '2013-2014(P)' colnames(df)[colnames(df) == 'X2011.12.1'] <- '2011-2012(Yield_Hort(P))' colnames(df)[colnames(df) == 'X2012.13.1'] <- '2012-2013(Yield_Hort(P))' colnames(df)[colnames(df) == 'X2013.14.1'] <- '2013-2014(Yield_Hort(P))' colnames(df)[colnames(df) == 'X..Change.in.2012.13.Over.2011.12'] <- '% change in 2012-2013 over 2011-2012(A)' colnames(df)[colnames(df) == 'NA..3'] <- '% change in 2012-2013 over 2011-2012(P)' colnames(df)[colnames(df) == 'X..Change.in.2013.14.Over.2012.13'] <- '% change in 2013-2014 over 2012-2013(A)' colnames(df)[colnames(df) == 'NA..4'] <- '% change in 2013-2014 over 2012-2013(P)' rownames(df) <- 1:nrow(df) df[is.na(df)] <- 0 df library(plotly) #..............BANANA PRODUCTION IN 2011-2012........................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2011-2012(P)`, type = 'scatter', mode = 'markers', size = ~`2011-2012(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Production of BANANA in 2011-2012', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) .............................................................................................................................................................. #...............................BANANA PRODUCTION IN 2012-2013................................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2012-2013(P)`, type = 'scatter', mode = 'markers', size = ~`2012-2013(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2012-2013 BANANA Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #.............................BANANA PRODUCTION IN 2013-2014............................................................................................................... plot_ly(df, x = ~`STATES`, y = ~`2013-2014(P)`, type = 'scatter', mode = 'markers', size = ~`2013-2014(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Production of BANANA in 2013-2014', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #..........BANANA YIELD PRODUCTION IN 2011-2012...................................................................................................... df$`2011-2012(Yield_Hort(P))` <- as.numeric(as.character(df$`2011-2012(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2011-2012(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2011-2012(Yield_Hort(P))`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controllig for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'Yield Production of BANANA in 2013-2014', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #............BANANA YIELD PRODUCTION IN 2012-2013....................................................................................................... df$`2012-2013(Yield_Hort(P))` <- as.numeric(as.character(df$`2012-2013(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2012-2013(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2012-2013(Yield_Hort(P))`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2012-2013 BANANA Yield Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................. #..........BANANA YIELD PRODUCTION IN 2013-2014....................................................................................................... df$`2013-2014(Yield_Hort(P))` <- as.numeric(as.character(df$`2013-2014(Yield_Hort(P))`)) plot_ly(df, x = ~`STATES`, y = ~`2013-2014(Yield_Hort(P))`, type = 'scatter', mode = 'markers', size = ~`2013-2014(A)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = '2013-2014 BANANA Production', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #........BANANA PRODUCTION % change in 2012-2013 over 2011-2012......................................................................................................... df$`% change in 2012-2013 over 2011-2012(A)` <- as.numeric(as.character(df$`% change in 2012-2013 over 2011-2012(A)`)) df$`% change in 2012-2013 over 2011-2012(P)` <- as.numeric(as.character(df$`% change in 2012-2013 over 2011-2012(P)`)) plot_ly(df, x = ~`STATES`, y = ~`% change in 2012-2013 over 2011-2012(P)`, type = 'scatter', mode = 'markers', size = ~`% change in 2012-2013 over 2011-2012(P)`, color = ~`STATES`, colors = 'Paired', marker = list(opacity = 0.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'BANANA Production % change in 2012-2013 over 2011-2012', xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................ #......BANANA PRODUCTION CHANGE % CHANGE IN 2013-2014 OVER 2012-2013............................................................................................................. df$`% change in 2013-2014 over 2012-2013(A)` <- as.numeric(as.character(df$`% change in 2013-2014 over 2012-2013(A)`)) df$`% change in 2013-2014 over 2012-2013(P)` <- as.numeric(as.character(df$`% change in 2013-2014 over 2012-2013(P)`)) plot_ly(df, x = ~`STATES`, y = ~`% change in 2013-2014 over 2012-2013(P)`, type = 'scatter', mode = 'markers', size = ~`% change in 2013-2014 over 2012-2013(P)` , color = ~`STATES`, colors = 'Paired', marker = list(opacity = 1.5, sizemode = 'diameter', #Controlling for the size of the bubbles: sizeref = 1.5)) %>% layout(title = 'BANANA Production % change in 2013-2014 over 2012-2013' , xaxis = list(showgrid = FALSE), yaxis = list(showgrid = FALSE), showlegend = FALSE) ............................................................................................................................................................
source("hw0.R") intro_to_r(6) intro_to_plotting(6)
/HW0_refactored/test.R
no_license
nasifimtiazohi/ALDA_HW
R
false
false
55
r
source("hw0.R") intro_to_r(6) intro_to_plotting(6)
# Null model for de novo regulatory mutations # Probability of mutation in middle base of each trinucleotide is calculated from empirical data. # The probabilities are assumed poisson, independent and summed across each sequence mut_rates <- read.table("../data/forSanger_1KG_mutation_rate_table.txt", header=TRUE) ### SNP MODEL - based on sequence context #### # the indel null model is inferred directly from global snp mutation rate and data - see rupit_core.R for generate_indel_null function p_base <- function(from){ # probability of mutation from base in position 2 to any of three other possible bases p = mut_rates$mu_snp[c(mut_rates$from == from)] return(sum(p)) } p_position <- function(sequence, normalize = FALSE){ # return vector length nchar(sequence) - 2 with probability of mutation at each base sequence = as.character(sequence) p = sapply(seq(1:nchar(sequence)-2), function(z) p_base(substr(sequence, z, z+2))) if (normalize == FALSE){ return(p) } else { return(p/sum(p)) } } p_sequence <- function(sequence){ # sum p_all across each trinucleotide sliding along sequence p = p_position(sequence) return(sum(as.numeric(p))) }
/R/regmut_null_model.R
no_license
pjshort/rupit
R
false
false
1,204
r
# Null model for de novo regulatory mutations # Probability of mutation in middle base of each trinucleotide is calculated from empirical data. # The probabilities are assumed poisson, independent and summed across each sequence mut_rates <- read.table("../data/forSanger_1KG_mutation_rate_table.txt", header=TRUE) ### SNP MODEL - based on sequence context #### # the indel null model is inferred directly from global snp mutation rate and data - see rupit_core.R for generate_indel_null function p_base <- function(from){ # probability of mutation from base in position 2 to any of three other possible bases p = mut_rates$mu_snp[c(mut_rates$from == from)] return(sum(p)) } p_position <- function(sequence, normalize = FALSE){ # return vector length nchar(sequence) - 2 with probability of mutation at each base sequence = as.character(sequence) p = sapply(seq(1:nchar(sequence)-2), function(z) p_base(substr(sequence, z, z+2))) if (normalize == FALSE){ return(p) } else { return(p/sum(p)) } } p_sequence <- function(sequence){ # sum p_all across each trinucleotide sliding along sequence p = p_position(sequence) return(sum(as.numeric(p))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/item_desc.R \name{crt_iteman} \alias{crt_iteman} \title{Calculate criterion-referenced item discrimination indices} \usage{ crt_iteman(data, items, cut_score, scale = "raw") } \arguments{ \item{data}{A data frame of dichotomously scored test times} \item{items}{Raw column indices representing the test items} \item{cut_score}{A raw or percentage cut-score} \item{scale}{A character vector indicating whether the cut-score is 'raw' (default) or 'percent'} } \value{ \code{if_pass} contains item facility values for test items for students who passed the test \code{if_fail} contains item facility values for test items for students who did not pass the test \code{if_total} contains item facility values for test items \code{b_index} contains b-index values for items on the test \code{agree_stat} contains agreement statistic values for items on the test \code{item_phi} contains item phi values for items on the test } \description{ Calculate criterion-referenced item discrimination indices } \examples{ crt_iteman(bh_depend, 2:31, 21, scale = 'raw') }
/man/crt_iteman.Rd
no_license
gtlaflair/rcrtan
R
false
true
1,143
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/item_desc.R \name{crt_iteman} \alias{crt_iteman} \title{Calculate criterion-referenced item discrimination indices} \usage{ crt_iteman(data, items, cut_score, scale = "raw") } \arguments{ \item{data}{A data frame of dichotomously scored test times} \item{items}{Raw column indices representing the test items} \item{cut_score}{A raw or percentage cut-score} \item{scale}{A character vector indicating whether the cut-score is 'raw' (default) or 'percent'} } \value{ \code{if_pass} contains item facility values for test items for students who passed the test \code{if_fail} contains item facility values for test items for students who did not pass the test \code{if_total} contains item facility values for test items \code{b_index} contains b-index values for items on the test \code{agree_stat} contains agreement statistic values for items on the test \code{item_phi} contains item phi values for items on the test } \description{ Calculate criterion-referenced item discrimination indices } \examples{ crt_iteman(bh_depend, 2:31, 21, scale = 'raw') }
tmAggregate <- function(dtfDT, indexList, type, ascending, drop.unused.levels, fun.aggregate, args) { l <- s <- i <- k <- n <- NULL depth <- length(indexList) dats <- list() for (d in 1:depth) { datd <- tmAggregateStep(dtfDT, indexList[1:d], fun.aggregate, args) if (d < depth) { indexPlus <- indexList[(d+1):depth] datd[, get("indexPlus"):=lapply(indexPlus, function(x)factor(NA, levels=levels(dtfDT[[x]])))] setcolorder(datd, c(indexList, "s", "c", "i")) } datd[, l:=d] dats[[d]] <- datd } datlist <- rbindlist(dats) datlist <- datlist[!is.na(datlist$index1), ] datlist <- datlist[!is.na(datlist$s), ] if (min(datlist$s) < 0) stop("vSize contains negative values.") datlist <- datlist[datlist$s>0,] if (drop.unused.levels && is.factor(datlist$c)) datlist$c <- datlist$c[, drop=TRUE] if (type=="dens") { # datlist[, c:=c/s] datlist[is.nan(datlist$c), c:=0] } if (!ascending) { datlist[, i:=-i] } # add unqiue key (k) datlist[, k:=as.factor(do.call("paste", c(as.list(datlist[, c(indexList, "l"), with=FALSE]), sep="__")))] setkey(datlist, k) # add label name (n) datlist[, n:=apply(datlist, MARGIN=1, FUN=function(x) x[as.integer(x["l"])])] datlist[, n:=ifelse(is.na(n), "", n)] datlist } tmAggregateStep <- function(dtfDT, indexList, fun.aggregate, args) { .SD <- s <- i <- NULL fun <- match.fun(fun.aggregate) isCat <- !is.numeric(dtfDT$c) ## aggregate numeric variable fn <- function(x) { if (is.numeric(x)) { sum(x, na.rm=TRUE) } else { which.max(table(x)) } } if (fun.aggregate=="weighted.mean") { dat <- dtfDT[ , list(s=fn(s), c=do.call("fun", c(list(c, w), args)), i=fn(i)), by=indexList] } else { dat <- dtfDT[ , list(s=fn(s), c=do.call("fun", c(list(c), args)), i=fn(i)), by=indexList] } ## aggregate categorical variables: for each aggregate, get the mode if (isCat) { #fact <- factor(datCat$c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c)) dat[, c:=factor(c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))] } dat }
/pkg/R/tmAggregate.R
no_license
chintasunny/treemap
R
false
false
2,356
r
tmAggregate <- function(dtfDT, indexList, type, ascending, drop.unused.levels, fun.aggregate, args) { l <- s <- i <- k <- n <- NULL depth <- length(indexList) dats <- list() for (d in 1:depth) { datd <- tmAggregateStep(dtfDT, indexList[1:d], fun.aggregate, args) if (d < depth) { indexPlus <- indexList[(d+1):depth] datd[, get("indexPlus"):=lapply(indexPlus, function(x)factor(NA, levels=levels(dtfDT[[x]])))] setcolorder(datd, c(indexList, "s", "c", "i")) } datd[, l:=d] dats[[d]] <- datd } datlist <- rbindlist(dats) datlist <- datlist[!is.na(datlist$index1), ] datlist <- datlist[!is.na(datlist$s), ] if (min(datlist$s) < 0) stop("vSize contains negative values.") datlist <- datlist[datlist$s>0,] if (drop.unused.levels && is.factor(datlist$c)) datlist$c <- datlist$c[, drop=TRUE] if (type=="dens") { # datlist[, c:=c/s] datlist[is.nan(datlist$c), c:=0] } if (!ascending) { datlist[, i:=-i] } # add unqiue key (k) datlist[, k:=as.factor(do.call("paste", c(as.list(datlist[, c(indexList, "l"), with=FALSE]), sep="__")))] setkey(datlist, k) # add label name (n) datlist[, n:=apply(datlist, MARGIN=1, FUN=function(x) x[as.integer(x["l"])])] datlist[, n:=ifelse(is.na(n), "", n)] datlist } tmAggregateStep <- function(dtfDT, indexList, fun.aggregate, args) { .SD <- s <- i <- NULL fun <- match.fun(fun.aggregate) isCat <- !is.numeric(dtfDT$c) ## aggregate numeric variable fn <- function(x) { if (is.numeric(x)) { sum(x, na.rm=TRUE) } else { which.max(table(x)) } } if (fun.aggregate=="weighted.mean") { dat <- dtfDT[ , list(s=fn(s), c=do.call("fun", c(list(c, w), args)), i=fn(i)), by=indexList] } else { dat <- dtfDT[ , list(s=fn(s), c=do.call("fun", c(list(c), args)), i=fn(i)), by=indexList] } ## aggregate categorical variables: for each aggregate, get the mode if (isCat) { #fact <- factor(datCat$c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c)) dat[, c:=factor(c, levels=1:nlevels(dtfDT$c), labels=levels(dtfDT$c))] } dat }
# Code for processing TUME (ESALQ/USP) # Authors: Gorgens (gorgens at usp.br); Andre Gracioso Peres da Silva (andregracioso@gmail.com) ############################################################################### #version 1.01 (dez/2016) ### ------------------------------------------------------------------------------- ### Funcoes auxiliares # Para cada especie, ajusta modelo hipsometrico e estima as alturas hipsometrica <- function(tume.esp){ tume.temp = tume.esp for (i in seq(1, length(tume.temp$Cod), 1)) { if (is.na(tume.temp$Cod[i])){ tume.temp$Cod[i] = 0 } } uteis = subset(tume.temp, tume.temp$H_m != "NA" & tume.temp$Cod != c(4,5,7)) logH = log(uteis$H_m) invD = 1/uteis$DAP_cm rm(uteis) modelo = lm(logH ~ invD) rm(logH, invD) tume.esp = cbind(tume.esp, invD = 1/tume.esp$DAP_cm) correcao = sum(exp(modelo$residuals))/length(modelo$residuals) estH = 0 for (i in seq(1,nrow(tume.esp))){ if (is.na(tume.esp$DAP_cm[i]) == FALSE & is.na(tume.esp$H_m[i]) == TRUE) { estH[i] = exp(predict(modelo, new = tume.esp[i,])) * correcao } else { estH[i] = tume.esp$H_m[i] } } return(estH) rm(modelo) } ### Calcula volume das arvores individuais com base no DAP, altura e fator de forma parcVolume <- function(tume.esp){ tempV = na.omit(cbind(tume.esp$DAP_cm, tume.esp$estH_m)) volume = sum((tempV[,1]^2*pi/40000) * tempV[,2] * 0.5) * 10000 / tume.esp$Parc_m2[1] rm(tempV) return(volume) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade maior que 23 meses e SEM desbaste resumo_pos24 <- function(tume.esp, estH_m){ resumo_pos <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tume.esp = tume.esp[, -9] # Remover Cod2 tume.esp = cbind(tume.esp, estH_m = estH) resumo_pos$I_meses = tume.esp$I_meses[1] resumo_pos$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pos$DAPmed = round(mean(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$DAPsd = round(sd(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pos$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pos$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) resumo_pos$N_fuste = round(length(na.omit(tume.esp$DAP_cm)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pos$Sobr = round(((1 - ((nrow(tume.esp) - nrow(tume.esp[tume.esp$Cod != 1 & tume.esp$Cod != 5,])) / max(tume.esp$N_arv))) * 100), 1) resumo_pos$G = round(sum(na.omit(tume.esp$DAP_cm)^2 * pi /40000) * 10000 / tume.esp$Parc_m2[1], 1) resumo_pos$V = round(parcVolume(tume.esp), 0) resumo_pos$IMA = round(resumo_pos$V[1] / (tume.esp$I_meses[1]/12), 1) if (e %in% ESP.DENSIDADE$Esp){ resumo_pos$B = round(resumo_pos$V[1] * ESP.DENSIDADE[ESP.DENSIDADE$Esp == e, 2],0) } else { resumo_pos$B = "" } # Incluir depois return(resumo_pos) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade maior que 23 meses e COM desbaste resumo_pos24desb <- function(tume.esp, estH_m){ resumo_pos <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tume.esp = tume.esp[, -9] tume.esp = cbind(tume.esp, estH_m = estH) resumo_pos$I_meses = tume.esp$I_meses[1] resumo_pos$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pos$DAPmed = round(mean(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$DAPsd = round(sd(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pos$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pos$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) resumo_pos$N_fuste = round(length(na.omit(tume.esp$DAP_cm)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pos$Sobr = "" resumo_pos$G = round(sum(na.omit(tume.esp$DAP_cm)^2 * pi /40000) * 10000 / tume.esp$Parc_m2[1], 1) resumo_pos$V = round(parcVolume(tume.esp), 0) resumo_pos$IMA = "" if (e %in% ESP.DENSIDADE$Esp){ resumo_pos$B = round(resumo_pos$V[1] * ESP.DENSIDADE[ESP.DENSIDADE$Esp == e, 2],0) } else { resumo_pos$B = "" } return(resumo_pos) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade inferior à 24 meses resumo_pre24 <- function(tume.esp){ resumo_pre <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) resumo_pre$I_meses = tume.esp$I_meses[1] resumo_pre$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pre$DAPmed = "" resumo_pre$DAPsd = "" resumo_pre$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pre$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pre$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) # Stick 6 resumo_pre$N_fuste = round(length(na.omit(tume.esp$H_m)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pre$Sobr = round(((1 - ((nrow(tume.esp) - nrow(tume.esp[tume.esp$Cod != 1 & tume.esp$Cod != 5,])) / max(tume.esp$N_arv))) * 100), 1) resumo_pre$G = "" resumo_pre$V = "" resumo_pre$IMA = "" resumo_pre$B = "" return(resumo_pre) } ### Cria grafico de barras para a variavel estoque de volume plotVolume <- function(tabela_resumo, l){ #Ordena o Volume de forma descrescente tabela_resumo = tabela_resumo[with(tabela_resumo, order(-V)), ] #Distancia horizontal para ultimo rotulo do eixo x end_point = 0.5 + nrow(tabela_resumo) + nrow(tabela_resumo)-1 if (nrow(tabela_resumo) > 17) { #Armazena grafico de barras do Volume em um arquivo de extensao .jpeg jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = nrow(tabela_resumo)*0.6, units = "cm", res = 300) } else { jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = 10, units = "cm", res = 300) } #Adiciona espaco na margem inferior para rotulacao do eixo x par(mar = c(5, 4, 2, 1) + 0.2, mgp = c(2.2,1,0)) #Fonte padrao utilizada pelo R par(family="sans") #Configuracoes do grafico de barras barplot(tabela_resumo$V, col="grey50", main=paste("TUME ", tabela_resumo$N_tume[1], " - ", tabela_resumo$I_meses[1], " meses", sep=""), cex.main = 0.6, cex.axis = 0.6, cex.lab = 0.6, ylab = "Volume (m³/ha)", ylim = c(0, 1.1 * max(tabela_resumo$V)), xlab = "", space = 1) # Coloca e rotaciona o rotulo do eixo X saltando os espacos entre colunas text(seq(1.5, end_point, by=2), par("usr")[3]-2, srt = 60, #rotaciona 60 graus no sentido horario adj = 1, xpd = TRUE, labels = tabela_resumo$Esp, cex = 0.6) dev.off() } ### Cria grafico de barras para a variavel altura media plotHmed <- function(tabela_resumo, l){ #Ordena a altura média de forma descrescente tabela_resumo = tabela_resumo[with(tabela_resumo, order(-Hmed)), ] #Distancia horizontal para ultimo rotulo do eixo x end_point = 0.5 + nrow(tabela_resumo) + nrow(tabela_resumo)-1 if (nrow(tabela_resumo) > 17) { #Armazena grafico de barras do Volume em um arquivo de extensao .jpeg jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = nrow(tabela_resumo)*0.6, units = "cm", res = 300) } else { jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = 10, units = "cm", res = 300) } #Adiciona espaco na margem inferior para rotulacao do eixo x par(mar = c(5, 4, 2, 1) + 0.2, mgp = c(2.2,1,0)) #Fonte padrao utilizada pelo R par(family="sans") #Configuracoes do grafico de barras barplot(tabela_resumo$Hmed, col="grey50", main=paste("TUME ", tabela_resumo$N_tume[1], " - ", tabela_resumo$I_meses[1], " meses", sep=""), cex.main=0.6, cex.axis = 0.6, cex.lab = 0.6, ylab = "Altura media (m)", ylim = c(0, 1.1 * max(tabela_resumo$Hmed)), xlab = "", space = 1) #largura das barras e largura entre as barras igual # Coloca e rotaciona o rotulo do eixo X saltando os espacos entre colunas text(seq(1.5, end_point, by=2), par("usr")[3]-0.1, srt = 60, #rotaciona 60 graus no sentido horario adj = 1, xpd = TRUE, labels = tabela_resumo$Esp, cex = 0.6) dev.off() } ### ------------------------------------------------------------------------------- ### Variaveis globais # (vazio) # (vazio) # Define pasta com arquivos de medicoes (arquivos de entrada) TUME.PATH <- paste(getwd(), "/input/", sep = "") # Define pasta para armazenamento dos arquivos de saida TUME.OUT <- paste(getwd(), "/output/", sep = "") # Define pasta com arquivos de referencia (ex: lista de densidades basicas por material genetico) TUME.REF <- paste(getwd(), "/referencias/", sep = "") # Cria vetor com os nomes dos arquivos TUME.FILES <- list.files(TUME.PATH) # Importa tabela de densidade (Densidades.csv) ESP.DENSIDADE <- read.csv(paste(TUME.REF, "Densidades.csv", sep="")) ### ------------------------------------------------------------------------------- ### Inicio da analise # Filtra o tume para uma determinada especie for (l in TUME.FILES){ # Importa arquivo de um tume tume = read.csv(paste(TUME.PATH, l, sep=""), sep=",") # Salva vetor com nome das especies contidas no TUME TUME.ESP <- levels(tume$Esp) tabela_resumo = data.frame() if (tume$I_meses[1] > 23 & 3 %in% tume$Cod){ for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,1:6])) > 3 & is.numeric(tume.esp$DAP_cm) == TRUE){ estH = hipsometrica(tume.esp) tabela_resumo = rbind(tabela_resumo, resumo_pos24desb(tume.esp, estH)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotVolume(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT,"saida_", l, sep = ""),row.names=FALSE) } else if (tume$I_meses[1] > 23 & !(3 %in% tume$Cod)){ for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,1:6])) > 3 & is.numeric(tume.esp$DAP_cm) == TRUE){ estH = hipsometrica(tume.esp) tabela_resumo = rbind(tabela_resumo, resumo_pos24(tume.esp, estH)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotVolume(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT,"saida_", l, sep = ""),row.names=FALSE) } else { for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,c(1:5, 7)])) > 3){ tabela_resumo = rbind(tabela_resumo, resumo_pre24(tume.esp)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotHmed(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT, "saida_", l, sep = ""),row.names=FALSE) } }
/R_TUME.R
no_license
Gorgens/TUME
R
false
false
14,766
r
# Code for processing TUME (ESALQ/USP) # Authors: Gorgens (gorgens at usp.br); Andre Gracioso Peres da Silva (andregracioso@gmail.com) ############################################################################### #version 1.01 (dez/2016) ### ------------------------------------------------------------------------------- ### Funcoes auxiliares # Para cada especie, ajusta modelo hipsometrico e estima as alturas hipsometrica <- function(tume.esp){ tume.temp = tume.esp for (i in seq(1, length(tume.temp$Cod), 1)) { if (is.na(tume.temp$Cod[i])){ tume.temp$Cod[i] = 0 } } uteis = subset(tume.temp, tume.temp$H_m != "NA" & tume.temp$Cod != c(4,5,7)) logH = log(uteis$H_m) invD = 1/uteis$DAP_cm rm(uteis) modelo = lm(logH ~ invD) rm(logH, invD) tume.esp = cbind(tume.esp, invD = 1/tume.esp$DAP_cm) correcao = sum(exp(modelo$residuals))/length(modelo$residuals) estH = 0 for (i in seq(1,nrow(tume.esp))){ if (is.na(tume.esp$DAP_cm[i]) == FALSE & is.na(tume.esp$H_m[i]) == TRUE) { estH[i] = exp(predict(modelo, new = tume.esp[i,])) * correcao } else { estH[i] = tume.esp$H_m[i] } } return(estH) rm(modelo) } ### Calcula volume das arvores individuais com base no DAP, altura e fator de forma parcVolume <- function(tume.esp){ tempV = na.omit(cbind(tume.esp$DAP_cm, tume.esp$estH_m)) volume = sum((tempV[,1]^2*pi/40000) * tempV[,2] * 0.5) * 10000 / tume.esp$Parc_m2[1] rm(tempV) return(volume) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade maior que 23 meses e SEM desbaste resumo_pos24 <- function(tume.esp, estH_m){ resumo_pos <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tume.esp = tume.esp[, -9] # Remover Cod2 tume.esp = cbind(tume.esp, estH_m = estH) resumo_pos$I_meses = tume.esp$I_meses[1] resumo_pos$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pos$DAPmed = round(mean(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$DAPsd = round(sd(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pos$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pos$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) resumo_pos$N_fuste = round(length(na.omit(tume.esp$DAP_cm)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pos$Sobr = round(((1 - ((nrow(tume.esp) - nrow(tume.esp[tume.esp$Cod != 1 & tume.esp$Cod != 5,])) / max(tume.esp$N_arv))) * 100), 1) resumo_pos$G = round(sum(na.omit(tume.esp$DAP_cm)^2 * pi /40000) * 10000 / tume.esp$Parc_m2[1], 1) resumo_pos$V = round(parcVolume(tume.esp), 0) resumo_pos$IMA = round(resumo_pos$V[1] / (tume.esp$I_meses[1]/12), 1) if (e %in% ESP.DENSIDADE$Esp){ resumo_pos$B = round(resumo_pos$V[1] * ESP.DENSIDADE[ESP.DENSIDADE$Esp == e, 2],0) } else { resumo_pos$B = "" } # Incluir depois return(resumo_pos) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade maior que 23 meses e COM desbaste resumo_pos24desb <- function(tume.esp, estH_m){ resumo_pos <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tume.esp = tume.esp[, -9] tume.esp = cbind(tume.esp, estH_m = estH) resumo_pos$I_meses = tume.esp$I_meses[1] resumo_pos$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pos$DAPmed = round(mean(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$DAPsd = round(sd(na.omit(tume.esp$DAP_cm)), 1) resumo_pos$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pos$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pos$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) resumo_pos$N_fuste = round(length(na.omit(tume.esp$DAP_cm)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pos$Sobr = "" resumo_pos$G = round(sum(na.omit(tume.esp$DAP_cm)^2 * pi /40000) * 10000 / tume.esp$Parc_m2[1], 1) resumo_pos$V = round(parcVolume(tume.esp), 0) resumo_pos$IMA = "" if (e %in% ESP.DENSIDADE$Esp){ resumo_pos$B = round(resumo_pos$V[1] * ESP.DENSIDADE[ESP.DENSIDADE$Esp == e, 2],0) } else { resumo_pos$B = "" } return(resumo_pos) } ### Calcula e cria tabela de resumo (estatisticas) por especie para tumes com idade inferior à 24 meses resumo_pre24 <- function(tume.esp){ resumo_pre <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) resumo_pre$I_meses = tume.esp$I_meses[1] resumo_pre$Parc_m2 = round(tume.esp$Parc_m2[1], 1) resumo_pre$DAPmed = "" resumo_pre$DAPsd = "" resumo_pre$Hmed = round(mean(na.omit(tume.esp$H_m)), 1) resumo_pre$Hsd = round(sd(na.omit(tume.esp$H_m)), 1) resumo_pre$Hdom = round(mean(na.omit(tume.esp[tume.esp$Cod == 6, names(tume.esp) %in% c("H_m")])), 1) # Stick 6 resumo_pre$N_fuste = round(length(na.omit(tume.esp$H_m)) * 10000 / tume.esp$Parc_m2[1], 0) resumo_pre$Sobr = round(((1 - ((nrow(tume.esp) - nrow(tume.esp[tume.esp$Cod != 1 & tume.esp$Cod != 5,])) / max(tume.esp$N_arv))) * 100), 1) resumo_pre$G = "" resumo_pre$V = "" resumo_pre$IMA = "" resumo_pre$B = "" return(resumo_pre) } ### Cria grafico de barras para a variavel estoque de volume plotVolume <- function(tabela_resumo, l){ #Ordena o Volume de forma descrescente tabela_resumo = tabela_resumo[with(tabela_resumo, order(-V)), ] #Distancia horizontal para ultimo rotulo do eixo x end_point = 0.5 + nrow(tabela_resumo) + nrow(tabela_resumo)-1 if (nrow(tabela_resumo) > 17) { #Armazena grafico de barras do Volume em um arquivo de extensao .jpeg jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = nrow(tabela_resumo)*0.6, units = "cm", res = 300) } else { jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = 10, units = "cm", res = 300) } #Adiciona espaco na margem inferior para rotulacao do eixo x par(mar = c(5, 4, 2, 1) + 0.2, mgp = c(2.2,1,0)) #Fonte padrao utilizada pelo R par(family="sans") #Configuracoes do grafico de barras barplot(tabela_resumo$V, col="grey50", main=paste("TUME ", tabela_resumo$N_tume[1], " - ", tabela_resumo$I_meses[1], " meses", sep=""), cex.main = 0.6, cex.axis = 0.6, cex.lab = 0.6, ylab = "Volume (m³/ha)", ylim = c(0, 1.1 * max(tabela_resumo$V)), xlab = "", space = 1) # Coloca e rotaciona o rotulo do eixo X saltando os espacos entre colunas text(seq(1.5, end_point, by=2), par("usr")[3]-2, srt = 60, #rotaciona 60 graus no sentido horario adj = 1, xpd = TRUE, labels = tabela_resumo$Esp, cex = 0.6) dev.off() } ### Cria grafico de barras para a variavel altura media plotHmed <- function(tabela_resumo, l){ #Ordena a altura média de forma descrescente tabela_resumo = tabela_resumo[with(tabela_resumo, order(-Hmed)), ] #Distancia horizontal para ultimo rotulo do eixo x end_point = 0.5 + nrow(tabela_resumo) + nrow(tabela_resumo)-1 if (nrow(tabela_resumo) > 17) { #Armazena grafico de barras do Volume em um arquivo de extensao .jpeg jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = nrow(tabela_resumo)*0.6, units = "cm", res = 300) } else { jpeg(paste(TUME.OUT, sub(".csv","",l,fixed=TRUE), ".jpg", sep=""), height = 7, width = 10, units = "cm", res = 300) } #Adiciona espaco na margem inferior para rotulacao do eixo x par(mar = c(5, 4, 2, 1) + 0.2, mgp = c(2.2,1,0)) #Fonte padrao utilizada pelo R par(family="sans") #Configuracoes do grafico de barras barplot(tabela_resumo$Hmed, col="grey50", main=paste("TUME ", tabela_resumo$N_tume[1], " - ", tabela_resumo$I_meses[1], " meses", sep=""), cex.main=0.6, cex.axis = 0.6, cex.lab = 0.6, ylab = "Altura media (m)", ylim = c(0, 1.1 * max(tabela_resumo$Hmed)), xlab = "", space = 1) #largura das barras e largura entre as barras igual # Coloca e rotaciona o rotulo do eixo X saltando os espacos entre colunas text(seq(1.5, end_point, by=2), par("usr")[3]-0.1, srt = 60, #rotaciona 60 graus no sentido horario adj = 1, xpd = TRUE, labels = tabela_resumo$Esp, cex = 0.6) dev.off() } ### ------------------------------------------------------------------------------- ### Variaveis globais # (vazio) # (vazio) # Define pasta com arquivos de medicoes (arquivos de entrada) TUME.PATH <- paste(getwd(), "/input/", sep = "") # Define pasta para armazenamento dos arquivos de saida TUME.OUT <- paste(getwd(), "/output/", sep = "") # Define pasta com arquivos de referencia (ex: lista de densidades basicas por material genetico) TUME.REF <- paste(getwd(), "/referencias/", sep = "") # Cria vetor com os nomes dos arquivos TUME.FILES <- list.files(TUME.PATH) # Importa tabela de densidade (Densidades.csv) ESP.DENSIDADE <- read.csv(paste(TUME.REF, "Densidades.csv", sep="")) ### ------------------------------------------------------------------------------- ### Inicio da analise # Filtra o tume para uma determinada especie for (l in TUME.FILES){ # Importa arquivo de um tume tume = read.csv(paste(TUME.PATH, l, sep=""), sep=",") # Salva vetor com nome das especies contidas no TUME TUME.ESP <- levels(tume$Esp) tabela_resumo = data.frame() if (tume$I_meses[1] > 23 & 3 %in% tume$Cod){ for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,1:6])) > 3 & is.numeric(tume.esp$DAP_cm) == TRUE){ estH = hipsometrica(tume.esp) tabela_resumo = rbind(tabela_resumo, resumo_pos24desb(tume.esp, estH)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotVolume(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT,"saida_", l, sep = ""),row.names=FALSE) } else if (tume$I_meses[1] > 23 & !(3 %in% tume$Cod)){ for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,1:6])) > 3 & is.numeric(tume.esp$DAP_cm) == TRUE){ estH = hipsometrica(tume.esp) tabela_resumo = rbind(tabela_resumo, resumo_pos24(tume.esp, estH)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotVolume(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT,"saida_", l, sep = ""),row.names=FALSE) } else { for (e in TUME.ESP){ tume.esp = subset(tume, tume$Esp == e) if (nrow(na.omit(tume.esp[,c(1:5, 7)])) > 3){ tabela_resumo = rbind(tabela_resumo, resumo_pre24(tume.esp)) } else { sem_dados <- data.frame(N_tume = tume.esp$N_tume[1], Esp = as.character(tume.esp$Esp[1]), I_meses = 0, Parc_m2 = 0, DAPmed = 0, DAPsd = 0, Hmed = 0, Hsd = 0, Hdom = 0, N_fuste = 0, Sobr = 0, G = 0, V = 0, IMA = 0, B = 0) tabela_resumo <- rbind(tabela_resumo, sem_dados) } } plotHmed(tabela_resumo, l) write.csv(tabela_resumo, file = paste(TUME.OUT, "saida_", l, sep = ""),row.names=FALSE) } }
## This code allows to compute the inverse of the matrix ## and cashe the result. If the matrix doesn't change, ## the inverse will be computed only once at the first request ## for the inverse of the matrix and cached for further requests. ## The first function, makeCacheMatrix creates a special "matrix", ## which is really a list containing a function to ## 1.) set the value of the matrix ## 2.) get the value of the matrix ## 3.) set the value of the inverse of the matrix ## 4.) get the value of the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function checks if the inverse is cached: if yes - gets the inverse, ## if not - computes the inverse and caches it using our special "matrix" cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' i <- x$getinverse() if(!is.null(i)) { message("getting cached data") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
/cachematrix.R
no_license
CarpenterVII/ProgrammingAssignment2
R
false
false
1,329
r
## This code allows to compute the inverse of the matrix ## and cashe the result. If the matrix doesn't change, ## the inverse will be computed only once at the first request ## for the inverse of the matrix and cached for further requests. ## The first function, makeCacheMatrix creates a special "matrix", ## which is really a list containing a function to ## 1.) set the value of the matrix ## 2.) get the value of the matrix ## 3.) set the value of the inverse of the matrix ## 4.) get the value of the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function checks if the inverse is cached: if yes - gets the inverse, ## if not - computes the inverse and caches it using our special "matrix" cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' i <- x$getinverse() if(!is.null(i)) { message("getting cached data") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
##assumes working directory is already established ##read data into var data1 data1<-read.delim("household_power_consumption.txt",header=T,sep=";",na.strings="?") ##Subseting data to plot data1subset<-data1[data1$Date=="1/2/2007" | data1$Date=="2/2/2007",] data1subset$Date<-as.Date(data1subset$Date,format="%d/%m/%Y") dateTime<-data.frame(paste(data1subset$Date,data1subset$Time,sep=" ")) names(dateTime)<-"dateTime" data1subset<-cbind(data1subset,dateTime) ##write 480x480 PNG file png(filename="plot2.png",width=480,height=480) ##print plot data plot(data1subset$dateTime,data1subset$Global_active_power,ylab="Global Active Power (kilowatts)",type="l",xaxt="n") lines(data1subset$dateTime,data1subset$Global_active_power,ylab="Global Active Power (kilowatts)") axis(1,at=1:3,lab=c("Thu","Fri","Sat"))
/Plot2.R
no_license
ftsua1985/ExData_Plotting1
R
false
false
807
r
##assumes working directory is already established ##read data into var data1 data1<-read.delim("household_power_consumption.txt",header=T,sep=";",na.strings="?") ##Subseting data to plot data1subset<-data1[data1$Date=="1/2/2007" | data1$Date=="2/2/2007",] data1subset$Date<-as.Date(data1subset$Date,format="%d/%m/%Y") dateTime<-data.frame(paste(data1subset$Date,data1subset$Time,sep=" ")) names(dateTime)<-"dateTime" data1subset<-cbind(data1subset,dateTime) ##write 480x480 PNG file png(filename="plot2.png",width=480,height=480) ##print plot data plot(data1subset$dateTime,data1subset$Global_active_power,ylab="Global Active Power (kilowatts)",type="l",xaxt="n") lines(data1subset$dateTime,data1subset$Global_active_power,ylab="Global Active Power (kilowatts)") axis(1,at=1:3,lab=c("Thu","Fri","Sat"))
# the code below was mostly contributed by @egnha from # https://github.com/yihui/formatR/pull/66 deparse_collapse = function(x) { d = deparse(x) if (length(d) > 1L) { paste(trimws(d, which = 'both'), collapse = ' ') } else { d } } count_tokens = function(.call) { if (length(.call) == 1L) { # +2 for '()' return(nchar(.call) + 2L) } # +1 for value-delimiting '(', ',', or ')' cnt_val = nchar(vapply(.call, deparse_collapse, character(1L))) + 1L nms = names(.call[-1L]) if (is.null(nms)) nms = character(length(.call[-1L])) # nchar() of argument names cnt_nm = nchar(nms) # +3 for ' = ', for argument-value pairs cnt_nm[cnt_nm != 0L] = cnt_nm[cnt_nm != 0L] + 3L # +1 for space before name, beyond the first argument cnt_nm[-1L] = cnt_nm[-1L] + 1L # function itself is not a named component cnt_nm = c(0L, cnt_nm) cumsum(cnt_val + cnt_nm) } # counts is a strictly increasing, positive integer vector find_breaks = function(counts, width, indent, track, counted = 0L) { if (!length(counts)) { return(list(breaks = NULL, overflow = NULL)) } overflow = NULL shift = if (counted == 0L) 0L else indent fits = counts - counted + shift <= width i = which.min(fits) - 1L if (i == 0L) { if (fits[[1L]]) { # all components of fits_on_line are TRUE i = length(counts) } else { # all components of fits_on_line are FALSE overflow = track(counted, counts[1L], shift) i = 1L } } post_space = if (i == 1L && counted == 0L) 0L else 1L rest = Recall(counts[-(1L:i)], width, indent, track, counts[i] + post_space) list( breaks = c(counts[i], rest$breaks), overflow = c(overflow, rest$overflow) ) } overflow_message = function(overflow, width, indent, text) { header = sprintf('Could not fit all lines to width %s (with indent %s):', width, indent) idxs = seq_along(overflow) args = vapply(idxs[idxs %% 3L == 1L], function(i) { l = paste(c(rep(' ', overflow[i + 2L]), trimws(substr(text, overflow[i] + 1L, overflow[i + 1L]), which = 'left')), collapse = '') sprintf('(%s) \"%s\"', nchar(l), l) }, character(1L)) one_string(c(header, args)) } tidy_usage = function(nm, usg, width, indent, fail) { text = paste(trimws(usg, which = 'both'), collapse = ' ') text = sub(sprintf('^%s\\s*', nm), nm, text) expr = parse(text = text)[[1L]] track_overflow = if (fail == 'none') function(...) NULL else base::c breaks = find_breaks(count_tokens(expr), width, indent, track_overflow) if (length(breaks$overflow)) { signal = switch(fail, stop = 'stop', warn = 'warning') msg = overflow_message(breaks$overflow, width, indent, text) getFromNamespace(signal, 'base')(msg, call. = FALSE) } breaks = c(0L, breaks$breaks) newline = paste(c('\n', character(indent)), collapse = ' ') paste( vapply(1L:(length(breaks) - 1L), function(i) { trimws(substr(text, breaks[i] + 1L, breaks[i + 1L]), which = 'left') }, character(1L)), collapse = newline ) } #' Show the usage of a function #' #' Print the reformatted usage of a function. The arguments of the function are #' searched by \code{\link{argsAnywhere}()}, so the function can be either #' exported or non-exported from a package. S3 methods will be marked. #' @param FUN The function name. #' @param width The width of the output. #' @param tidy Whether to reformat the usage code. #' @param output Whether to print the output to the console (via #' \code{\link{cat}()}). #' @param indent.by.FUN Whether to indent subsequent lines by the width of the #' function name (see \dQuote{Details}). #' @param fail A character string that represents the action taken when the #' width constraint is unfulfillable. "warn" and "stop" will signal warnings #' and errors, while "none" will do nothing. #' @return Reformatted usage code of a function, in character strings #' (invisible). #' @details Line breaks in the output occur between arguments. In particular, #' default values of arguments will not be split across lines. #' #' When \code{indent.by.FUN} is \code{FALSE}, indentation is set by the option #' \code{\link{getOption}("formatR.indent", 4L)}, the default value of the #' \code{indent} argument of \code{\link{tidy_source}()}. #' @seealso \code{\link{tidy_source}()} #' @export #' @examples library(formatR) #' usage(var) #' #' usage(plot) #' #' usage(plot.default) # default method #' usage('plot.lm') # on the 'lm' class #' #' usage(usage) #' #' usage(barplot.default, width = 60) # output lines have 60 characters or less #' #' # indent by width of 'barplot(' #' usage(barplot.default, width = 60, indent.by.FUN = TRUE) #' #' \dontrun{ #' # a warning is raised because the width constraint is unfulfillable #' usage(barplot.default, width = 30) #' } usage = function(FUN, width = getOption('width'), tidy = TRUE, output = TRUE, indent.by.FUN = FALSE, fail = c('warn', 'stop', 'none')) { fail = match.arg(fail) fn = as.character(substitute(FUN)) res = capture.output(if (is.function(FUN)) args(FUN) else { do.call(argsAnywhere, list(fn)) }) if (identical(res, 'NULL')) return() res[1] = substring(res[1], 9) # rm 'function ' in the beginning isS3 = FALSE if (length(fn) == 3 && (fn[1] %in% c('::', ':::'))) fn = fn[3] if (grepl('.', fn, fixed = TRUE)) { n = length(parts <- strsplit(fn, '.', fixed = TRUE)[[1]]) for (i in 2:n) { gen = paste(parts[1L:(i - 1)], collapse = ".") cl = paste(parts[i:n], collapse = ".") if (gen == "" || cl == "") next if (!is.null(f <- getS3method(gen, cl, TRUE)) && !is.null(environment(f))) { res[1] = paste(gen, res[1]) header = if (cl == 'default') '## Default S3 method:' else sprintf("## S3 method for class '%s'", cl) res = c(header, res) isS3 = TRUE break } } } if (!isS3) res[1] = paste(fn, res[1]) if ((n <- length(res)) > 1 && res[n] == 'NULL') res = res[-n] # rm last element 'NULL' if (!tidy) { if (output) cat(res, sep = '\n') return(invisible(res)) } nm = if (isS3) gen else fn usg = if (isS3) res[-1L] else res indent = if (indent.by.FUN) { # +1 for '(' nchar(nm) + 1L } else { # Default indent for tidy_source() getOption('formatR.indent', 4L) } out = tidy_usage(nm, usg, width, indent, fail) if (isS3) out = c(res[1L], out) if (output) cat(out, sep = '\n') invisible(out) }
/R/usage.R
no_license
cderv/formatR
R
false
false
6,532
r
# the code below was mostly contributed by @egnha from # https://github.com/yihui/formatR/pull/66 deparse_collapse = function(x) { d = deparse(x) if (length(d) > 1L) { paste(trimws(d, which = 'both'), collapse = ' ') } else { d } } count_tokens = function(.call) { if (length(.call) == 1L) { # +2 for '()' return(nchar(.call) + 2L) } # +1 for value-delimiting '(', ',', or ')' cnt_val = nchar(vapply(.call, deparse_collapse, character(1L))) + 1L nms = names(.call[-1L]) if (is.null(nms)) nms = character(length(.call[-1L])) # nchar() of argument names cnt_nm = nchar(nms) # +3 for ' = ', for argument-value pairs cnt_nm[cnt_nm != 0L] = cnt_nm[cnt_nm != 0L] + 3L # +1 for space before name, beyond the first argument cnt_nm[-1L] = cnt_nm[-1L] + 1L # function itself is not a named component cnt_nm = c(0L, cnt_nm) cumsum(cnt_val + cnt_nm) } # counts is a strictly increasing, positive integer vector find_breaks = function(counts, width, indent, track, counted = 0L) { if (!length(counts)) { return(list(breaks = NULL, overflow = NULL)) } overflow = NULL shift = if (counted == 0L) 0L else indent fits = counts - counted + shift <= width i = which.min(fits) - 1L if (i == 0L) { if (fits[[1L]]) { # all components of fits_on_line are TRUE i = length(counts) } else { # all components of fits_on_line are FALSE overflow = track(counted, counts[1L], shift) i = 1L } } post_space = if (i == 1L && counted == 0L) 0L else 1L rest = Recall(counts[-(1L:i)], width, indent, track, counts[i] + post_space) list( breaks = c(counts[i], rest$breaks), overflow = c(overflow, rest$overflow) ) } overflow_message = function(overflow, width, indent, text) { header = sprintf('Could not fit all lines to width %s (with indent %s):', width, indent) idxs = seq_along(overflow) args = vapply(idxs[idxs %% 3L == 1L], function(i) { l = paste(c(rep(' ', overflow[i + 2L]), trimws(substr(text, overflow[i] + 1L, overflow[i + 1L]), which = 'left')), collapse = '') sprintf('(%s) \"%s\"', nchar(l), l) }, character(1L)) one_string(c(header, args)) } tidy_usage = function(nm, usg, width, indent, fail) { text = paste(trimws(usg, which = 'both'), collapse = ' ') text = sub(sprintf('^%s\\s*', nm), nm, text) expr = parse(text = text)[[1L]] track_overflow = if (fail == 'none') function(...) NULL else base::c breaks = find_breaks(count_tokens(expr), width, indent, track_overflow) if (length(breaks$overflow)) { signal = switch(fail, stop = 'stop', warn = 'warning') msg = overflow_message(breaks$overflow, width, indent, text) getFromNamespace(signal, 'base')(msg, call. = FALSE) } breaks = c(0L, breaks$breaks) newline = paste(c('\n', character(indent)), collapse = ' ') paste( vapply(1L:(length(breaks) - 1L), function(i) { trimws(substr(text, breaks[i] + 1L, breaks[i + 1L]), which = 'left') }, character(1L)), collapse = newline ) } #' Show the usage of a function #' #' Print the reformatted usage of a function. The arguments of the function are #' searched by \code{\link{argsAnywhere}()}, so the function can be either #' exported or non-exported from a package. S3 methods will be marked. #' @param FUN The function name. #' @param width The width of the output. #' @param tidy Whether to reformat the usage code. #' @param output Whether to print the output to the console (via #' \code{\link{cat}()}). #' @param indent.by.FUN Whether to indent subsequent lines by the width of the #' function name (see \dQuote{Details}). #' @param fail A character string that represents the action taken when the #' width constraint is unfulfillable. "warn" and "stop" will signal warnings #' and errors, while "none" will do nothing. #' @return Reformatted usage code of a function, in character strings #' (invisible). #' @details Line breaks in the output occur between arguments. In particular, #' default values of arguments will not be split across lines. #' #' When \code{indent.by.FUN} is \code{FALSE}, indentation is set by the option #' \code{\link{getOption}("formatR.indent", 4L)}, the default value of the #' \code{indent} argument of \code{\link{tidy_source}()}. #' @seealso \code{\link{tidy_source}()} #' @export #' @examples library(formatR) #' usage(var) #' #' usage(plot) #' #' usage(plot.default) # default method #' usage('plot.lm') # on the 'lm' class #' #' usage(usage) #' #' usage(barplot.default, width = 60) # output lines have 60 characters or less #' #' # indent by width of 'barplot(' #' usage(barplot.default, width = 60, indent.by.FUN = TRUE) #' #' \dontrun{ #' # a warning is raised because the width constraint is unfulfillable #' usage(barplot.default, width = 30) #' } usage = function(FUN, width = getOption('width'), tidy = TRUE, output = TRUE, indent.by.FUN = FALSE, fail = c('warn', 'stop', 'none')) { fail = match.arg(fail) fn = as.character(substitute(FUN)) res = capture.output(if (is.function(FUN)) args(FUN) else { do.call(argsAnywhere, list(fn)) }) if (identical(res, 'NULL')) return() res[1] = substring(res[1], 9) # rm 'function ' in the beginning isS3 = FALSE if (length(fn) == 3 && (fn[1] %in% c('::', ':::'))) fn = fn[3] if (grepl('.', fn, fixed = TRUE)) { n = length(parts <- strsplit(fn, '.', fixed = TRUE)[[1]]) for (i in 2:n) { gen = paste(parts[1L:(i - 1)], collapse = ".") cl = paste(parts[i:n], collapse = ".") if (gen == "" || cl == "") next if (!is.null(f <- getS3method(gen, cl, TRUE)) && !is.null(environment(f))) { res[1] = paste(gen, res[1]) header = if (cl == 'default') '## Default S3 method:' else sprintf("## S3 method for class '%s'", cl) res = c(header, res) isS3 = TRUE break } } } if (!isS3) res[1] = paste(fn, res[1]) if ((n <- length(res)) > 1 && res[n] == 'NULL') res = res[-n] # rm last element 'NULL' if (!tidy) { if (output) cat(res, sep = '\n') return(invisible(res)) } nm = if (isS3) gen else fn usg = if (isS3) res[-1L] else res indent = if (indent.by.FUN) { # +1 for '(' nchar(nm) + 1L } else { # Default indent for tidy_source() getOption('formatR.indent', 4L) } out = tidy_usage(nm, usg, width, indent, fail) if (isS3) out = c(res[1L], out) if (output) cat(out, sep = '\n') invisible(out) }
\name{statGen} \alias{statGen} \title{ Individual genotype statistics for an R/qtl cross object } \description{ Individual genotype statistics for the current linkage map order of and R/qtl cross object } \usage{ statGen(cross, chr, bychr = TRUE, stat.type = c("xo","dxo","miss"), id = "Genotype") } \arguments{ \item{cross}{ An R/qtl \code{cross} object with class structure \code{"bc", "dh","riself","bcsft"}. (see \code{?mstmap.cross} for more details.) } \item{chr}{ Character vector of linkage group names used for subsetting the linkage map. } \item{bychr}{ Logical vector determining whether statistics should be plotted by chromosome (see Details). } \item{stat.type}{ Character string of any combination of \code{"xo"} or \code{"dxo"} or both. \code{"miss"}. \code{"xo"} calculates the number of crossovers, \code{"dxo"} calculates the number of double crossover and \code{"miss"} calculates the number of missing values. } \item{id}{ Character string determining the column of \code{cross$pheno} that contains the genotype names. } } \details{ This function is used in \code{profileGen} to plot any combination of returned linkage map statistics on a single graphical display. } \value{ A list with elements named by the \code{stat.type} used in the call. If \code{bychr = TRUE} then each element is a data frame of statistics with columns named by the linkage groups. If \code{bychr = FALSE} then each element is a vector of statistics named by the \code{stat.type}. } \references{ Taylor, J., Butler, D. (2017) R Package ASMap: Efficient Genetic Linkage Map Construction and Diagnosis. Journal of Statistical Software, \bold{79}(6), 1--29. } \author{ Julian Taylor } \seealso{ \code{\link{profileGen}} } \examples{ data(mapDH, package = "ASMap") ## produce all genotype crossover and double crossover statistics sg <- statGen(mapDH, stat.type = c("xo","dxo")) } \keyword{misc}
/man/statGen.Rd
no_license
kbroman/ASMap
R
false
false
1,964
rd
\name{statGen} \alias{statGen} \title{ Individual genotype statistics for an R/qtl cross object } \description{ Individual genotype statistics for the current linkage map order of and R/qtl cross object } \usage{ statGen(cross, chr, bychr = TRUE, stat.type = c("xo","dxo","miss"), id = "Genotype") } \arguments{ \item{cross}{ An R/qtl \code{cross} object with class structure \code{"bc", "dh","riself","bcsft"}. (see \code{?mstmap.cross} for more details.) } \item{chr}{ Character vector of linkage group names used for subsetting the linkage map. } \item{bychr}{ Logical vector determining whether statistics should be plotted by chromosome (see Details). } \item{stat.type}{ Character string of any combination of \code{"xo"} or \code{"dxo"} or both. \code{"miss"}. \code{"xo"} calculates the number of crossovers, \code{"dxo"} calculates the number of double crossover and \code{"miss"} calculates the number of missing values. } \item{id}{ Character string determining the column of \code{cross$pheno} that contains the genotype names. } } \details{ This function is used in \code{profileGen} to plot any combination of returned linkage map statistics on a single graphical display. } \value{ A list with elements named by the \code{stat.type} used in the call. If \code{bychr = TRUE} then each element is a data frame of statistics with columns named by the linkage groups. If \code{bychr = FALSE} then each element is a vector of statistics named by the \code{stat.type}. } \references{ Taylor, J., Butler, D. (2017) R Package ASMap: Efficient Genetic Linkage Map Construction and Diagnosis. Journal of Statistical Software, \bold{79}(6), 1--29. } \author{ Julian Taylor } \seealso{ \code{\link{profileGen}} } \examples{ data(mapDH, package = "ASMap") ## produce all genotype crossover and double crossover statistics sg <- statGen(mapDH, stat.type = c("xo","dxo")) } \keyword{misc}
# Get posterior samples w <- do.call(rbind.data.frame, fit_blm$posterior$samples) # Split w and sigma sigma <- w[,4] w <- w[,-4] # Create linear combinations lincom <- bfit$input$X %*% t(w) # Function to calculate R2 for each bayes_R2 <- function(lincom_current, sigma) { # Variance in lincom current vlc <- var(lincom_current) # R2 return(vlc / (vlc + sigma^2)) } # Calculate for each linear combination r2v <- rep(0, ncol(lincom)) for(i in 1:length(r2v)) { r2v[i] <- bayes_R2(lincom[,i], sigma[i]) } library(blm) library(dplyr) data("mtcars") df <- mtcars %>% select(mpg, wt, qsec, am) %>% mutate(wt = wt - mean(wt), qsec = qsec - mean(qsec), am = am - mean(am)) row.names(df) <- 1:nrow(df) blm_setup() fit_blm <- blm("mpg ~ wt + qsec + am", data = mtcars) %>% set_sampling_options(chains=1, iter=10000, burn=2000, thinning=20) %>% sample_posterior() plot(fit_blm, "history") bayes_R2 <- function(fit) { y_pred <- rstanarm::posterior_linpred(fit) var_fit <- apply(y_pred, 1, var) var_res <- as.matrix(fit, pars = c("sigma"))^2 var_fit / (var_fit + var_res) } library(rstanarm) (fit <- stan_lm(attitude ~ ., data=df, prior=NULL, # the next line is only to make the example go fast enough chains = 1, iter = 4000, seed = 12345)) rsq_bayes <- bayes_R2(fit) hist(rsq_bayes)
/experiments/3. R-squared/R2.R
permissive
anhnguyendepocen/blm
R
false
false
1,362
r
# Get posterior samples w <- do.call(rbind.data.frame, fit_blm$posterior$samples) # Split w and sigma sigma <- w[,4] w <- w[,-4] # Create linear combinations lincom <- bfit$input$X %*% t(w) # Function to calculate R2 for each bayes_R2 <- function(lincom_current, sigma) { # Variance in lincom current vlc <- var(lincom_current) # R2 return(vlc / (vlc + sigma^2)) } # Calculate for each linear combination r2v <- rep(0, ncol(lincom)) for(i in 1:length(r2v)) { r2v[i] <- bayes_R2(lincom[,i], sigma[i]) } library(blm) library(dplyr) data("mtcars") df <- mtcars %>% select(mpg, wt, qsec, am) %>% mutate(wt = wt - mean(wt), qsec = qsec - mean(qsec), am = am - mean(am)) row.names(df) <- 1:nrow(df) blm_setup() fit_blm <- blm("mpg ~ wt + qsec + am", data = mtcars) %>% set_sampling_options(chains=1, iter=10000, burn=2000, thinning=20) %>% sample_posterior() plot(fit_blm, "history") bayes_R2 <- function(fit) { y_pred <- rstanarm::posterior_linpred(fit) var_fit <- apply(y_pred, 1, var) var_res <- as.matrix(fit, pars = c("sigma"))^2 var_fit / (var_fit + var_res) } library(rstanarm) (fit <- stan_lm(attitude ~ ., data=df, prior=NULL, # the next line is only to make the example go fast enough chains = 1, iter = 4000, seed = 12345)) rsq_bayes <- bayes_R2(fit) hist(rsq_bayes)
# Title : TODO # Objective : TODO # Created by: coeat # Created on: 2021/04/20 library(MASS) print('START**********') tab <- with(Cars93, table(Type)) # (tab <- with(Cars93, table(Type))) <- 괄호로 감싸주면 입력구문에서 출력까지 같이 해준다고 함 # tab <-table(Cars93$Type) #<- 8번행은 이 행과 같다 head <- c('Compact', 'Large', 'Midsize', 'Small', 'Sporty', 'Van') barplot(tab, main = "Type of Car", xlab = "Type", ylab = "Number of Car", col = 1:6, legend = head, names.arg = head)
/139p-barplot.R
no_license
kukaro/R-Computing-Study
R
false
false
570
r
# Title : TODO # Objective : TODO # Created by: coeat # Created on: 2021/04/20 library(MASS) print('START**********') tab <- with(Cars93, table(Type)) # (tab <- with(Cars93, table(Type))) <- 괄호로 감싸주면 입력구문에서 출력까지 같이 해준다고 함 # tab <-table(Cars93$Type) #<- 8번행은 이 행과 같다 head <- c('Compact', 'Large', 'Midsize', 'Small', 'Sporty', 'Van') barplot(tab, main = "Type of Car", xlab = "Type", ylab = "Number of Car", col = 1:6, legend = head, names.arg = head)
############################################################################ ### ### Some general comments on the structure of the scripts: ### Each module (00, 01 ...) builds on the previous ones and does NOT work ### standalone - this avoids redundancies. Data, functions etc. will be ### carried over from one module to another. Saving and loading of interim ### outputs is done into path2temp. The data input comes directly from ### a CSV file or the Natura2000 data portal, so loading local files ### is not necessary. ### ### Overall structure of the modules is: ### ### - 00_initialize_directories_and_scripts.R ### - 01_load_libraries_and_functions.R ### - 02_load_data.R ### - 03_IUCN_analysis.R ### - 04_core_analysis.R ### ### Authors: CH, MB ############################################################################ ############################################################################ ### 00.1. set and create the working and temporary directories ### ############################################################################ workingDir <- getwd() tempDir <- "tempDirectory" path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) if (file.exists(path2temp) & file.exists(path2wd)){ path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) } else { dir.create(path2temp) path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) } ############################################################################ ### 00.2. source all relevant R scripts ############################################################################ ### helper function "%+%" <- function(x,y)paste(x,y,sep="") ### load libraries, functions and google sheets ptm <- proc.time(); source(path2wd %+% "/01_load_libraries_and_functions.R"); proc.time() - ptm # ca. 30 seconds ptm <- proc.time(); source(path2wd %+% "/02_load_data.R"); proc.time() - ptm # ca. 100 seconds ptm <- proc.time(); source(path2wd %+% "/03_IUCN_analysis.R"); proc.time() - ptm # 300 seconds ptm <- proc.time(); source(path2wd %+% "/04_core_analysis.R"); proc.time() - ptm # 190 seconds
/00_initialize_directories_and_scripts.R
no_license
Katatrepsis/Ziv_et_al_N2000
R
false
false
2,200
r
############################################################################ ### ### Some general comments on the structure of the scripts: ### Each module (00, 01 ...) builds on the previous ones and does NOT work ### standalone - this avoids redundancies. Data, functions etc. will be ### carried over from one module to another. Saving and loading of interim ### outputs is done into path2temp. The data input comes directly from ### a CSV file or the Natura2000 data portal, so loading local files ### is not necessary. ### ### Overall structure of the modules is: ### ### - 00_initialize_directories_and_scripts.R ### - 01_load_libraries_and_functions.R ### - 02_load_data.R ### - 03_IUCN_analysis.R ### - 04_core_analysis.R ### ### Authors: CH, MB ############################################################################ ############################################################################ ### 00.1. set and create the working and temporary directories ### ############################################################################ workingDir <- getwd() tempDir <- "tempDirectory" path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) if (file.exists(path2temp) & file.exists(path2wd)){ path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) } else { dir.create(path2temp) path2temp <- file.path(workingDir, tempDir) path2wd <- file.path(workingDir) } ############################################################################ ### 00.2. source all relevant R scripts ############################################################################ ### helper function "%+%" <- function(x,y)paste(x,y,sep="") ### load libraries, functions and google sheets ptm <- proc.time(); source(path2wd %+% "/01_load_libraries_and_functions.R"); proc.time() - ptm # ca. 30 seconds ptm <- proc.time(); source(path2wd %+% "/02_load_data.R"); proc.time() - ptm # ca. 100 seconds ptm <- proc.time(); source(path2wd %+% "/03_IUCN_analysis.R"); proc.time() - ptm # 300 seconds ptm <- proc.time(); source(path2wd %+% "/04_core_analysis.R"); proc.time() - ptm # 190 seconds
renv::restore() if (blogdown::hugo_version() != numeric_version("0.74.3")) { options(blogdown.hugo.dir = normalizePath("renv/bin")) blogdown::install_hugo(version = "v0.74.3") }
/install.R
no_license
TranSophie/sophietran-website
R
false
false
184
r
renv::restore() if (blogdown::hugo_version() != numeric_version("0.74.3")) { options(blogdown.hugo.dir = normalizePath("renv/bin")) blogdown::install_hugo(version = "v0.74.3") }
context("Data stores") test_that("file store", { st <- file_store$new() path <- tempdir() file <- file.path(path, "test.txt") writeLines(letters, file) expect_that(st$exists(file), is_true()) h <- st$get_hash(file) expect_that(h, equals(unname(tools::md5sum(file)))) expect_that(st$get_hash("not/in/store"), throws_error("not found in file store")) expect_that(st$exists("not/in/store"), is_false()) st$del(file) expect_that(st$exists(file), is_false()) })
/tests/testthat/test-store.R
no_license
aammd/remake
R
false
false
512
r
context("Data stores") test_that("file store", { st <- file_store$new() path <- tempdir() file <- file.path(path, "test.txt") writeLines(letters, file) expect_that(st$exists(file), is_true()) h <- st$get_hash(file) expect_that(h, equals(unname(tools::md5sum(file)))) expect_that(st$get_hash("not/in/store"), throws_error("not found in file store")) expect_that(st$exists("not/in/store"), is_false()) st$del(file) expect_that(st$exists(file), is_false()) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit.meta.GMCM.R \name{fit.meta.GMCM} \alias{fit.meta.GMCM} \alias{fit.meta.gmcm} \alias{fit.special.GMCM} \alias{fit.special.gmcm} \title{Estimate GMCM parameters of the special model} \usage{ fit.meta.GMCM( u, init.par, method = c("NM", "SANN", "L-BFGS", "L-BFGS-B", "PEM"), max.ite = 1000, verbose = TRUE, positive.rho = TRUE, trace.theta = FALSE, ... ) fit.special.GMCM( u, init.par, method = c("NM", "SANN", "L-BFGS", "L-BFGS-B", "PEM"), max.ite = 1000, verbose = TRUE, positive.rho = TRUE, trace.theta = FALSE, ... ) } \arguments{ \item{u}{An \code{n} by \code{d} matrix of test statistics. Rows correspond to features and columns to experiments. Larger values are assumed to be indicative of stronger evidence and reproducibility.} \item{init.par}{A 4-dimensional vector of the initial parameters where, \code{init.par[1]} is the mixture proportion of spurious signals, \code{init.par[2]} is the mean, \code{init.par[3]} is the standard deviation, \code{init.par[4]} is the correlation.} \item{method}{A character vector of length \eqn{1}{1}. The optimization method used. Should be either \code{"NM"}, \code{"SANN"}, \code{"L-BFGS"}, \code{"L-BFGS-B"}, or \code{"PEM"} which are abbreviations of Nelder-Mead, Simulated Annealing, limited-memory quasi-Newton method, limited-memory quasi-Newton method with box constraints, and the pseudo EM algorithm, respectively. Default is \code{"NM"}. See \code{\link{optim}} for further details.} \item{max.ite}{The maximum number of iterations. If the \code{method} is \code{"SANN"} this is the number of iterations as there is no other stopping criterion. (See \code{\link{optim}})} \item{verbose}{Logical. If \code{TRUE}, the log-likelihood values are printed.} \item{positive.rho}{\code{logical}. If \code{TRUE}, the correlation parameter is restricted to be positive.} \item{trace.theta}{\code{logical}. Extra convergence information is appended as a list to the output returned if \code{TRUE}. The exact behavior is dependent on the value of \code{method}. If \code{method} equals \code{"PEM"}, the argument is passed to \code{trace.theta} in \code{\link{PseudoEMAlgorithm}}. Otherwise it is passed to the control argument \code{trace} in \code{\link{optim}}.} \item{\dots}{Arguments passed to the \code{control}-list in \code{\link{optim}} or \code{\link{PseudoEMAlgorithm}} if \code{method} is \code{"PEM"}.} } \value{ A vector \code{par} of length 4 of the fitted parameters where \code{par[1]} is the probability of being from the first (or null) component, \code{par[2]} is the mean, \code{par[3]} is the standard deviation, and \code{par[4]} is the correlation. If \code{trace.theta} is \code{TRUE}, then a \code{list} is returned where the first entry is as described above and the second entry is the trace information (dependent of \code{method}.). } \description{ This function estimates the parameters of the special restricted Gaussian mixture copula model (GMCM) proposed by Li et. al. (2011). It is used to perform reproducibility (or meta) analysis using GMCMs. It features various optimization routines to identify the maximum likelihood estimate of the special GMCMs. } \details{ The \code{"L-BFGS-B"} method does not perform a transformation of the parameters. \code{fit.special.GMCM} is simply an alias of \code{fit.meta.gmcm}. } \note{ Simulated annealing is strongly dependent on the initial values and the cooling scheme. See \code{\link{optim}} for further details. } \examples{ set.seed(1) # True parameters true.par <- c(0.9, 2, 0.7, 0.6) # Simulation of data from the GMCM model data <- SimulateGMCMData(n = 1000, par = true.par) uhat <- Uhat(data$u) # Ranked observed data init.par <- c(0.5, 1, 0.5, 0.9) # Initial parameters # Optimization with Nelder-Mead nm.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "NM") \dontrun{ # Comparison with other optimization methods # Optimization with simulated annealing sann.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "SANN", max.ite = 3000, temp = 1) # Optimization with the Pseudo EM algorithm pem.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "PEM") # The estimates agree nicely rbind("True" = true.par, "Start" = init.par, "NM" = nm.par, "SANN" = sann.par, "PEM" = pem.par) } # Get estimated cluster Khat <- get.IDR(x = uhat, par = nm.par)$Khat plot(uhat, col = Khat, main = "Clustering\nIDR < 0.05") } \references{ Li, Q., Brown, J. B. J. B., Huang, H., & Bickel, P. J. (2011). Measuring reproducibility of high-throughput experiments. The Annals of Applied Statistics, 5(3), 1752-1779. doi:10.1214/11-AOAS466 } \seealso{ \code{\link{optim}} } \author{ Anders Ellern Bilgrau <anders.ellern.bilgrau@gmail.com> }
/man/fit.meta.GMCM.Rd
no_license
AEBilgrau/GMCM
R
false
true
4,859
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit.meta.GMCM.R \name{fit.meta.GMCM} \alias{fit.meta.GMCM} \alias{fit.meta.gmcm} \alias{fit.special.GMCM} \alias{fit.special.gmcm} \title{Estimate GMCM parameters of the special model} \usage{ fit.meta.GMCM( u, init.par, method = c("NM", "SANN", "L-BFGS", "L-BFGS-B", "PEM"), max.ite = 1000, verbose = TRUE, positive.rho = TRUE, trace.theta = FALSE, ... ) fit.special.GMCM( u, init.par, method = c("NM", "SANN", "L-BFGS", "L-BFGS-B", "PEM"), max.ite = 1000, verbose = TRUE, positive.rho = TRUE, trace.theta = FALSE, ... ) } \arguments{ \item{u}{An \code{n} by \code{d} matrix of test statistics. Rows correspond to features and columns to experiments. Larger values are assumed to be indicative of stronger evidence and reproducibility.} \item{init.par}{A 4-dimensional vector of the initial parameters where, \code{init.par[1]} is the mixture proportion of spurious signals, \code{init.par[2]} is the mean, \code{init.par[3]} is the standard deviation, \code{init.par[4]} is the correlation.} \item{method}{A character vector of length \eqn{1}{1}. The optimization method used. Should be either \code{"NM"}, \code{"SANN"}, \code{"L-BFGS"}, \code{"L-BFGS-B"}, or \code{"PEM"} which are abbreviations of Nelder-Mead, Simulated Annealing, limited-memory quasi-Newton method, limited-memory quasi-Newton method with box constraints, and the pseudo EM algorithm, respectively. Default is \code{"NM"}. See \code{\link{optim}} for further details.} \item{max.ite}{The maximum number of iterations. If the \code{method} is \code{"SANN"} this is the number of iterations as there is no other stopping criterion. (See \code{\link{optim}})} \item{verbose}{Logical. If \code{TRUE}, the log-likelihood values are printed.} \item{positive.rho}{\code{logical}. If \code{TRUE}, the correlation parameter is restricted to be positive.} \item{trace.theta}{\code{logical}. Extra convergence information is appended as a list to the output returned if \code{TRUE}. The exact behavior is dependent on the value of \code{method}. If \code{method} equals \code{"PEM"}, the argument is passed to \code{trace.theta} in \code{\link{PseudoEMAlgorithm}}. Otherwise it is passed to the control argument \code{trace} in \code{\link{optim}}.} \item{\dots}{Arguments passed to the \code{control}-list in \code{\link{optim}} or \code{\link{PseudoEMAlgorithm}} if \code{method} is \code{"PEM"}.} } \value{ A vector \code{par} of length 4 of the fitted parameters where \code{par[1]} is the probability of being from the first (or null) component, \code{par[2]} is the mean, \code{par[3]} is the standard deviation, and \code{par[4]} is the correlation. If \code{trace.theta} is \code{TRUE}, then a \code{list} is returned where the first entry is as described above and the second entry is the trace information (dependent of \code{method}.). } \description{ This function estimates the parameters of the special restricted Gaussian mixture copula model (GMCM) proposed by Li et. al. (2011). It is used to perform reproducibility (or meta) analysis using GMCMs. It features various optimization routines to identify the maximum likelihood estimate of the special GMCMs. } \details{ The \code{"L-BFGS-B"} method does not perform a transformation of the parameters. \code{fit.special.GMCM} is simply an alias of \code{fit.meta.gmcm}. } \note{ Simulated annealing is strongly dependent on the initial values and the cooling scheme. See \code{\link{optim}} for further details. } \examples{ set.seed(1) # True parameters true.par <- c(0.9, 2, 0.7, 0.6) # Simulation of data from the GMCM model data <- SimulateGMCMData(n = 1000, par = true.par) uhat <- Uhat(data$u) # Ranked observed data init.par <- c(0.5, 1, 0.5, 0.9) # Initial parameters # Optimization with Nelder-Mead nm.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "NM") \dontrun{ # Comparison with other optimization methods # Optimization with simulated annealing sann.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "SANN", max.ite = 3000, temp = 1) # Optimization with the Pseudo EM algorithm pem.par <- fit.meta.GMCM(uhat, init.par = init.par, method = "PEM") # The estimates agree nicely rbind("True" = true.par, "Start" = init.par, "NM" = nm.par, "SANN" = sann.par, "PEM" = pem.par) } # Get estimated cluster Khat <- get.IDR(x = uhat, par = nm.par)$Khat plot(uhat, col = Khat, main = "Clustering\nIDR < 0.05") } \references{ Li, Q., Brown, J. B. J. B., Huang, H., & Bickel, P. J. (2011). Measuring reproducibility of high-throughput experiments. The Annals of Applied Statistics, 5(3), 1752-1779. doi:10.1214/11-AOAS466 } \seealso{ \code{\link{optim}} } \author{ Anders Ellern Bilgrau <anders.ellern.bilgrau@gmail.com> }
Arimax Code #Installing library package library(tseries) library("TTR") library("TSA") library("forecast") library("lubridate") library(tsoutliers) library(expsmooth) library(fma) #getting required data (base data of RPM for wednesday) arima_data_Wednesday <- read.csv("D:/Projects/Arimax_All_Days_Model/RPM_all_days/Wednesday_RPM.csv", stringsAsFactors=FALSE) #filtering for the required data based on timeperiod arima_data_Wednesday <- arima_data_Wednesday[18:226,] #Assigning column names  names(arima_data_Wednesday) <- c("Date", "RPM") #Obtaining RPM Column  arima_RPM=arima_data_Wednesday[,2] #converting into time series arima_data_timeseries <- ts(arima_RPM) names(arima_data_timeseries) <- c("RPM") #Identifying Additive Outlier and Temporaray Change  outlier.arima_data <- tsoutliers::tso(arima_data_timeseries,types = c("AO","TC")) outlier.arima_data plot(outlier.arima_data) par(mfrow=c(2,1)) plot(outlier.arima_data) #plotting time series, ACF and PACF Plots par(mfrow=c(2,2)) plot.ts(arima_data_timeseries, main = "Time Series plot of Wednesday RPM") #plot(arima_data_wednesday, main = "RPM of Wednesday trend", type = "l") acf(arima_data_timeseries) pacf(arima_data_timeseries) #running Dickey-Fuller test of stationarity adf.test(arima_data_timeseries, alternative="stationary", k=0) pp.test(arima_data_timeseries) kpss.test(arima_data_timeseries, "Trend") #Regressor Dataset seasonal_reg <- read.csv("D:/Projects/Arimax_All_Days_Model/Regressor_all_days/Wednesday_Regressor.csv", stringsAsFactors=FALSE) #Filtering Regressor dataset for Training Timeperiod xreg <- seasonal_reg[44:252,-c(1)] #running arima model on dataset dataarima_2 <- arima(arima_data_timeseries, order=c(5,2,2),xreg = xreg) #Filtering Regressor dataset for Model Validation Time period  newxreg <- seasonal_reg[253:267,] #Predicting RPM for the next 15 wednesday's dataforecasts <- predict(dataarima_2, n.ahead=15,newxreg = newxreg[,-c(1)]) dataforecasts #writing data onto CSV file write.table(dataforecasts, file = "D:\\Projects\\ARIMAX\\Trial.csv", sep = ",") write.table(dataarima_2$residuals, file = "D:\\Projects\\ARIMAX\\Residuals.csv", sep = ",") #plots par(mfrow=c(2,1)) acf(x=dataarima_2$residuals) pacf(x=dataarima_2$residuals,drop.lag.0 = FALSE) auto.arima(dataarima_2)
/RPM_Arimax.R
no_license
Deepak2094/Time-Series-ARIMAX
R
false
false
2,296
r
Arimax Code #Installing library package library(tseries) library("TTR") library("TSA") library("forecast") library("lubridate") library(tsoutliers) library(expsmooth) library(fma) #getting required data (base data of RPM for wednesday) arima_data_Wednesday <- read.csv("D:/Projects/Arimax_All_Days_Model/RPM_all_days/Wednesday_RPM.csv", stringsAsFactors=FALSE) #filtering for the required data based on timeperiod arima_data_Wednesday <- arima_data_Wednesday[18:226,] #Assigning column names  names(arima_data_Wednesday) <- c("Date", "RPM") #Obtaining RPM Column  arima_RPM=arima_data_Wednesday[,2] #converting into time series arima_data_timeseries <- ts(arima_RPM) names(arima_data_timeseries) <- c("RPM") #Identifying Additive Outlier and Temporaray Change  outlier.arima_data <- tsoutliers::tso(arima_data_timeseries,types = c("AO","TC")) outlier.arima_data plot(outlier.arima_data) par(mfrow=c(2,1)) plot(outlier.arima_data) #plotting time series, ACF and PACF Plots par(mfrow=c(2,2)) plot.ts(arima_data_timeseries, main = "Time Series plot of Wednesday RPM") #plot(arima_data_wednesday, main = "RPM of Wednesday trend", type = "l") acf(arima_data_timeseries) pacf(arima_data_timeseries) #running Dickey-Fuller test of stationarity adf.test(arima_data_timeseries, alternative="stationary", k=0) pp.test(arima_data_timeseries) kpss.test(arima_data_timeseries, "Trend") #Regressor Dataset seasonal_reg <- read.csv("D:/Projects/Arimax_All_Days_Model/Regressor_all_days/Wednesday_Regressor.csv", stringsAsFactors=FALSE) #Filtering Regressor dataset for Training Timeperiod xreg <- seasonal_reg[44:252,-c(1)] #running arima model on dataset dataarima_2 <- arima(arima_data_timeseries, order=c(5,2,2),xreg = xreg) #Filtering Regressor dataset for Model Validation Time period  newxreg <- seasonal_reg[253:267,] #Predicting RPM for the next 15 wednesday's dataforecasts <- predict(dataarima_2, n.ahead=15,newxreg = newxreg[,-c(1)]) dataforecasts #writing data onto CSV file write.table(dataforecasts, file = "D:\\Projects\\ARIMAX\\Trial.csv", sep = ",") write.table(dataarima_2$residuals, file = "D:\\Projects\\ARIMAX\\Residuals.csv", sep = ",") #plots par(mfrow=c(2,1)) acf(x=dataarima_2$residuals) pacf(x=dataarima_2$residuals,drop.lag.0 = FALSE) auto.arima(dataarima_2)
if (interactive()) source(rprojroot::find_package_root_file("tests/testlive/helper-globals_live.R")) account = local_create_account() withr::defer(account$authentication$logout()) # cannot be in local_create_account with covr p_api = ProjectManagementApi$new(account$apiClient) f_api = FormsAndSubmissionsApi$new(account$apiClient) project_id = create_test_project(account, p_api)$id withr::defer(delete_user_with_email(account, new_email)) # To be initialized with <<- actor_id = NULL role_id = NULL test_that('Can create new user',{ body = list(email = new_email, password = new_password) user_cols = c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email') # Delete user if it exists delete_user_with_email(account, new_email) # Create new user content = account$create_new_user(body = body)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), user_cols) actor_id <<- content$id }) test_that('Can create published form',{ filename = file.path(testdata_directory, 'xlsx', 'repeat.xlsx') stopifnot(file.exists(filename)) content = f_api$create_form(project_id, filename, x_xls_form_form_id_fallback = "repeat", publish = TRUE, ignore_warnings = TRUE)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('version', 'hash', 'sha', 'sha256', 'keyId', 'draftToken', 'enketoId', 'publishedAt', 'projectId', 'xmlFormId', 'state', 'name', 'enketoOnceId', 'createdAt', 'updatedAt')) }) test_that('Can get roles',{ content = account$list_roles()$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'name', 'system', 'createdAt', 'updatedAt', 'verbs')) role_id <<- content[content$system == "manager", "id"] }) test_that('Can get role details', { content = account$get_role_details(role_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'name', 'system', 'createdAt', 'updatedAt', 'verbs')) expect_equal(content$id[1], role_id) }) test_that('Can get authenticated user details', { content = account$get_authenticated_user_details()$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email')) expect_equal(content$displayName, email) expect_equal(content$email, email) }) test_that('Can get user details', { content = account$get_user_details(actor_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email')) expect_equal(content$displayName, new_email) expect_equal(content$email, new_email) }) test_that('Can assign actor to form role',{ content = f_api$assign_actor_to_form_role(project_id, "repeat", role_id, actor_id)$content expect_s3_class(content, "data.frame") expect_true(content$success) }) test_that('Can list all assignments',{ content = account$list_assignments()$content expect_s3_class(content, 'data.frame') expect_setequal(names(content), c('roleId', 'actorId')) }) test_that('Can list all form assignments in project',{ content = p_api$list_form_assignments_in_project(project_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('xmlFormId', 'roleId', 'actorId')) expect_equal(unique(content$xmlFormId), "repeat") }) test_that('Can list role specific assignments in project',{ content = p_api$list_role_specific_form_assignments_in_project(project_id, "manager")$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('xmlFormId', 'roleId', 'actorId')) })
/tests/testlive/test-FormRoles.R
no_license
dmenne/odkapi
R
false
false
3,712
r
if (interactive()) source(rprojroot::find_package_root_file("tests/testlive/helper-globals_live.R")) account = local_create_account() withr::defer(account$authentication$logout()) # cannot be in local_create_account with covr p_api = ProjectManagementApi$new(account$apiClient) f_api = FormsAndSubmissionsApi$new(account$apiClient) project_id = create_test_project(account, p_api)$id withr::defer(delete_user_with_email(account, new_email)) # To be initialized with <<- actor_id = NULL role_id = NULL test_that('Can create new user',{ body = list(email = new_email, password = new_password) user_cols = c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email') # Delete user if it exists delete_user_with_email(account, new_email) # Create new user content = account$create_new_user(body = body)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), user_cols) actor_id <<- content$id }) test_that('Can create published form',{ filename = file.path(testdata_directory, 'xlsx', 'repeat.xlsx') stopifnot(file.exists(filename)) content = f_api$create_form(project_id, filename, x_xls_form_form_id_fallback = "repeat", publish = TRUE, ignore_warnings = TRUE)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('version', 'hash', 'sha', 'sha256', 'keyId', 'draftToken', 'enketoId', 'publishedAt', 'projectId', 'xmlFormId', 'state', 'name', 'enketoOnceId', 'createdAt', 'updatedAt')) }) test_that('Can get roles',{ content = account$list_roles()$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'name', 'system', 'createdAt', 'updatedAt', 'verbs')) role_id <<- content[content$system == "manager", "id"] }) test_that('Can get role details', { content = account$get_role_details(role_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'name', 'system', 'createdAt', 'updatedAt', 'verbs')) expect_equal(content$id[1], role_id) }) test_that('Can get authenticated user details', { content = account$get_authenticated_user_details()$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email')) expect_equal(content$displayName, email) expect_equal(content$email, email) }) test_that('Can get user details', { content = account$get_user_details(actor_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('id', 'type', 'displayName', 'createdAt', 'updatedAt', 'email')) expect_equal(content$displayName, new_email) expect_equal(content$email, new_email) }) test_that('Can assign actor to form role',{ content = f_api$assign_actor_to_form_role(project_id, "repeat", role_id, actor_id)$content expect_s3_class(content, "data.frame") expect_true(content$success) }) test_that('Can list all assignments',{ content = account$list_assignments()$content expect_s3_class(content, 'data.frame') expect_setequal(names(content), c('roleId', 'actorId')) }) test_that('Can list all form assignments in project',{ content = p_api$list_form_assignments_in_project(project_id)$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('xmlFormId', 'roleId', 'actorId')) expect_equal(unique(content$xmlFormId), "repeat") }) test_that('Can list role specific assignments in project',{ content = p_api$list_role_specific_form_assignments_in_project(project_id, "manager")$content expect_s3_class(content, 'data.frame') expect_equal(names(content), c('xmlFormId', 'roleId', 'actorId')) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DiversityCurves.R \name{DiversityCurves} \alias{DiversityCurves} \alias{phyloDiv} \alias{taxicDivCont} \alias{taxicDivDisc} \title{Diversity Curves} \usage{ taxicDivCont(timeData, int.length = 1, int.times = NULL, plot = TRUE, plotLogRich = FALSE, timelims = NULL, drop.cryptic = FALSE) taxicDivDisc(timeList, int.times = NULL, drop.singletons = FALSE, plot = TRUE, plotLogRich = FALSE, timelims = NULL, extant.adjust = 0.001, split.int = TRUE) phyloDiv(tree, int.length = 0.1, int.times = NULL, plot = TRUE, plotLogRich = FALSE, drop.ZLB = TRUE, timelims = NULL) } \arguments{ \item{timeData}{Two-column matrix giving the per-taxon first and last appearances in absolute time. The simulated data tables output by \code{fossilRecord2fossilTaxa} following simulation with \code{simFossilRecord} can also be supplied to \code{taxicDivCont}.} \item{int.length}{The length of intervals used to make the diversity curve. Ignored if int.times is given.} \item{int.times}{An optional two-column matrix of the interval start and end times for calculating the diversity curve. If NULL, calculated internally. If given, the argument split.int and int.length are ignored.} \item{plot}{If TRUE, a diversity curve generated from the data is plotted.} \item{plotLogRich}{If TRUE, taxic diversity is plotted on log scale.} \item{timelims}{Limits for the x (time) axis for diversity curve plots. Only affects plotting. Given as either NULL (the default) or as a vector of length two as for 'xlim' in the basic R function plot. Time axes will be plotted \emph{exactly} to these values.} \item{drop.cryptic}{If TRUE, cryptic taxa are merged to form one taxon for estimating taxon curves. Only works for objects from \code{simFossilRecord} via \code{fossilRecord2fossilTaxa}.} \item{timeList}{A list composed of two matrices, giving interval start and end dates and taxon first and last occurrences within those intervals. See details.} \item{drop.singletons}{If TRUE, taxa confined to a single interval will be dropped prior to the diversity curve calculation. This is sometimes done if single intervals have overly high diversities due to the 'monograph' effect where more named taxa are known in certain intervals largely due to taxonomic expert effort and not real changes in historical biotic diversity.} \item{extant.adjust}{Amount of time to be added to extend start time for (0,0) bins for extant taxa, so that the that 'time interval' doesn't appear to have an infinitely small width.} \item{split.int}{For discrete time data, should calculated/input intervals be split at discrete time interval boundaries? If FALSE, can create apparent artifacts in calculating the diversity curve. See below.} \item{tree}{A time-scaled phylogeny of class phylo.} \item{drop.ZLB}{If true, zero-length terminal branches are dropped from the input tree for phylogenetic datasets, before calculating standing diversity.} } \value{ These functions will invisibly return a three-column matrix, where the first two columns are interval start and end times and the third column is the number of taxa/lineages counted in that interval. } \description{ Functions to plot diversity curves based on taxic range data, in both discrete and continuous time, and for phylogenies. } \details{ First, some background. Diversity curves are plots of species/taxon/lineage richness over time for a particular group of organisms. For paleontological studies, these are generally based on per-taxon range data while more recently in evolutionary biology, molecular phylogenies have been used to calculate lineage-through-time plots (LTTs). Neither of these approaches are without their particular weaknesses; reconstructing the true history of biodiversity is a difficult task no matter what data is available. The diversity curves produced by these functions will always measure diversity within binned time intervals (and plot them as rectangular bins). For continuous-time data or phylogenies, one could decrease the int.length used to get what is essentially an 'instantaneous' estimate of diversity. This is warned against, however, as most historical diversity data will have some time-averaging or uncertain temporal resolution and thus is probably not finely-resolved enough to calculate instantaneous estimates of diversity. As with many functions in the paleotree library, absolute time is always decreasing, i.e. the present day is zero. As diversity is counted within binned intervals, the true standing diversity may be somewhat lower than the measured (observed) quantity, particularly if intervals are longer than the mean duration of taxa is used. This will be an issue with all diversity curve functions, but particularly the discrete-time variant. For diversity data in particularly large discrete time intervals, plotting this data in smaller bins which do not line up completely with the original intervals will create a 'spiky' diversity curve, as these smaller intersecting bins will have a large number of taxa which may have been present in either of the neighboring intervals. This will give these small bins an apparently high estimated standing diversity. This artifact is avoided with the default setting split.int=TRUE, which will split any input or calculated intervals so that they start and end at the boundaries of the discrete-time range bins. The timeList object should be a list composed of two matrices, the first matrix giving by-interval start and end times (in absolute time), the second matrix giving the by-taxon first and last appearances in the intervals defined in the first matrix, numbered as the rows. Absolute time should be decreasing, while the intervals should be numbered so that the number increases with time. Taxa alive in the modern should be listed as last occurring in a time interval that begins at time 0 and ends at time 0. See the documentation for the time-scaling function \code{\link{bin_timePaleoPhy}} and the simulation function \code{\link{binTimeData}} for more information on formatting. Unlike some paleotree functions, such as perCapitaRates, the intervals can be overlapping or of unequal length. The diversity curve functions deal with such issues by assuming taxa occur from the base of the interval they are first found in until the end of the last interval they are occur in. Taxa in wide-ranging intervals that contain many others will be treated as occurring in all nested intervals. phyloDiv will resolve polytomies to be dichotomous nodes separated by zero-length branches prior to calculating the diversity curve. There is no option to alter this behavior, but it should not affect the use of the function because the addition of the zero-length branches should produce an identical diversity history as a polytomy. phyloDiv will also drop zero-length terminal branches, as with the function dropZLB. This the default behavior for the function but can be turned off by setting the argument drop.zlb to FALSE. } \examples{ #taxicDivDisc with the retiolinae dataset data(retiolitinae) taxicDivDisc(retioRanges) #simulation examples set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) #let's see what the 'true' diversity curve looks like in this case #plot the FADs and LADs with taxicDivCont() taxicDivCont(taxa) #simulate a fossil record with imperfect sampling with sampleRanges rangesCont <- sampleRanges(taxa,r=0.5) #plot the diversity curve based on the sampled ranges layout(1:2) taxicDivCont(rangesCont) #Now let's use binTimeData to bin in intervals of 1 time unit rangesDisc <- binTimeData(rangesCont,int.length=1) #plot with taxicDivDisc taxicDivDisc(rangesDisc) #compare to the continuous time diversity curve layout(1) #Now let's make a tree using taxa2phylo tree <- taxa2phylo(taxa,obs_time=rangesCont[,2]) phyloDiv(tree) #a simple example with phyloDiv #using a tree from rtree in ape set.seed(444) tree <- rtree(100) phyloDiv(tree) #a neat example of using phyDiv with timeSliceTree #to simulate doing molecular-phylogeny studies #of diversification...in the past set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) taxicDivCont(taxa) #that's the whole diversity curve #with timeSliceTree we could look at the lineage accumulation curve #we'd get of species sampled at a point in time tree <- taxa2phylo(taxa) #use timeSliceTree to make tree of relationships up until time=950 tree950 <- timeSliceTree(tree,sliceTime=950,plot=TRUE,drop.extinct=FALSE) #use drop.extinct=T to only get the tree of lineages extant at time=950 tree950 <- timeSliceTree(tree,sliceTime=950,plot=TRUE,drop.extinct=TRUE) #now its an ultrametric tree with many fewer tips... #lets plot the lineage accumulation plot on a log scale phyloDiv(tree950,plotLogRich=TRUE) #an example of a 'spiky' diversity curve and why split.int is a good thing set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) taxaDiv <- taxicDivCont(taxa) #simulate a fossil record with imperfect sampling with sampleRanges() rangesCont <- sampleRanges(taxa,r=0.5) rangesDisc <- binTimeData(rangesCont,int.length=10) #now let's plot with taxicDivDisc() but with the intervals from taxaDiv #by default, split.int=TRUE taxicDivDisc(rangesDisc,int.times=taxaDiv[,1:2],split.int=TRUE) #look pretty #now let's turn off split.int taxicDivDisc(rangesDisc,int.times=taxaDiv[,1:2],split.int=FALSE) #looks 'spiky'! } \author{ David W. Bapst } \seealso{ \code{\link{multiDiv}}, \code{\link{timeSliceTree}}, \code{\link{binTimeData}} There are several different functions for traditional LTT plots (phylogenetic diversity curves), such as the function ,\code{\link{ltt.plot}} in the package ape, the function \code{ltt} in the package phytools, the function \code{plotLtt} in the package laser and the function \code{LTT.average.root} in the package TreeSim. }
/man/DiversityCurves.Rd
permissive
KlausVigo/paleotree
R
false
true
10,110
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DiversityCurves.R \name{DiversityCurves} \alias{DiversityCurves} \alias{phyloDiv} \alias{taxicDivCont} \alias{taxicDivDisc} \title{Diversity Curves} \usage{ taxicDivCont(timeData, int.length = 1, int.times = NULL, plot = TRUE, plotLogRich = FALSE, timelims = NULL, drop.cryptic = FALSE) taxicDivDisc(timeList, int.times = NULL, drop.singletons = FALSE, plot = TRUE, plotLogRich = FALSE, timelims = NULL, extant.adjust = 0.001, split.int = TRUE) phyloDiv(tree, int.length = 0.1, int.times = NULL, plot = TRUE, plotLogRich = FALSE, drop.ZLB = TRUE, timelims = NULL) } \arguments{ \item{timeData}{Two-column matrix giving the per-taxon first and last appearances in absolute time. The simulated data tables output by \code{fossilRecord2fossilTaxa} following simulation with \code{simFossilRecord} can also be supplied to \code{taxicDivCont}.} \item{int.length}{The length of intervals used to make the diversity curve. Ignored if int.times is given.} \item{int.times}{An optional two-column matrix of the interval start and end times for calculating the diversity curve. If NULL, calculated internally. If given, the argument split.int and int.length are ignored.} \item{plot}{If TRUE, a diversity curve generated from the data is plotted.} \item{plotLogRich}{If TRUE, taxic diversity is plotted on log scale.} \item{timelims}{Limits for the x (time) axis for diversity curve plots. Only affects plotting. Given as either NULL (the default) or as a vector of length two as for 'xlim' in the basic R function plot. Time axes will be plotted \emph{exactly} to these values.} \item{drop.cryptic}{If TRUE, cryptic taxa are merged to form one taxon for estimating taxon curves. Only works for objects from \code{simFossilRecord} via \code{fossilRecord2fossilTaxa}.} \item{timeList}{A list composed of two matrices, giving interval start and end dates and taxon first and last occurrences within those intervals. See details.} \item{drop.singletons}{If TRUE, taxa confined to a single interval will be dropped prior to the diversity curve calculation. This is sometimes done if single intervals have overly high diversities due to the 'monograph' effect where more named taxa are known in certain intervals largely due to taxonomic expert effort and not real changes in historical biotic diversity.} \item{extant.adjust}{Amount of time to be added to extend start time for (0,0) bins for extant taxa, so that the that 'time interval' doesn't appear to have an infinitely small width.} \item{split.int}{For discrete time data, should calculated/input intervals be split at discrete time interval boundaries? If FALSE, can create apparent artifacts in calculating the diversity curve. See below.} \item{tree}{A time-scaled phylogeny of class phylo.} \item{drop.ZLB}{If true, zero-length terminal branches are dropped from the input tree for phylogenetic datasets, before calculating standing diversity.} } \value{ These functions will invisibly return a three-column matrix, where the first two columns are interval start and end times and the third column is the number of taxa/lineages counted in that interval. } \description{ Functions to plot diversity curves based on taxic range data, in both discrete and continuous time, and for phylogenies. } \details{ First, some background. Diversity curves are plots of species/taxon/lineage richness over time for a particular group of organisms. For paleontological studies, these are generally based on per-taxon range data while more recently in evolutionary biology, molecular phylogenies have been used to calculate lineage-through-time plots (LTTs). Neither of these approaches are without their particular weaknesses; reconstructing the true history of biodiversity is a difficult task no matter what data is available. The diversity curves produced by these functions will always measure diversity within binned time intervals (and plot them as rectangular bins). For continuous-time data or phylogenies, one could decrease the int.length used to get what is essentially an 'instantaneous' estimate of diversity. This is warned against, however, as most historical diversity data will have some time-averaging or uncertain temporal resolution and thus is probably not finely-resolved enough to calculate instantaneous estimates of diversity. As with many functions in the paleotree library, absolute time is always decreasing, i.e. the present day is zero. As diversity is counted within binned intervals, the true standing diversity may be somewhat lower than the measured (observed) quantity, particularly if intervals are longer than the mean duration of taxa is used. This will be an issue with all diversity curve functions, but particularly the discrete-time variant. For diversity data in particularly large discrete time intervals, plotting this data in smaller bins which do not line up completely with the original intervals will create a 'spiky' diversity curve, as these smaller intersecting bins will have a large number of taxa which may have been present in either of the neighboring intervals. This will give these small bins an apparently high estimated standing diversity. This artifact is avoided with the default setting split.int=TRUE, which will split any input or calculated intervals so that they start and end at the boundaries of the discrete-time range bins. The timeList object should be a list composed of two matrices, the first matrix giving by-interval start and end times (in absolute time), the second matrix giving the by-taxon first and last appearances in the intervals defined in the first matrix, numbered as the rows. Absolute time should be decreasing, while the intervals should be numbered so that the number increases with time. Taxa alive in the modern should be listed as last occurring in a time interval that begins at time 0 and ends at time 0. See the documentation for the time-scaling function \code{\link{bin_timePaleoPhy}} and the simulation function \code{\link{binTimeData}} for more information on formatting. Unlike some paleotree functions, such as perCapitaRates, the intervals can be overlapping or of unequal length. The diversity curve functions deal with such issues by assuming taxa occur from the base of the interval they are first found in until the end of the last interval they are occur in. Taxa in wide-ranging intervals that contain many others will be treated as occurring in all nested intervals. phyloDiv will resolve polytomies to be dichotomous nodes separated by zero-length branches prior to calculating the diversity curve. There is no option to alter this behavior, but it should not affect the use of the function because the addition of the zero-length branches should produce an identical diversity history as a polytomy. phyloDiv will also drop zero-length terminal branches, as with the function dropZLB. This the default behavior for the function but can be turned off by setting the argument drop.zlb to FALSE. } \examples{ #taxicDivDisc with the retiolinae dataset data(retiolitinae) taxicDivDisc(retioRanges) #simulation examples set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) #let's see what the 'true' diversity curve looks like in this case #plot the FADs and LADs with taxicDivCont() taxicDivCont(taxa) #simulate a fossil record with imperfect sampling with sampleRanges rangesCont <- sampleRanges(taxa,r=0.5) #plot the diversity curve based on the sampled ranges layout(1:2) taxicDivCont(rangesCont) #Now let's use binTimeData to bin in intervals of 1 time unit rangesDisc <- binTimeData(rangesCont,int.length=1) #plot with taxicDivDisc taxicDivDisc(rangesDisc) #compare to the continuous time diversity curve layout(1) #Now let's make a tree using taxa2phylo tree <- taxa2phylo(taxa,obs_time=rangesCont[,2]) phyloDiv(tree) #a simple example with phyloDiv #using a tree from rtree in ape set.seed(444) tree <- rtree(100) phyloDiv(tree) #a neat example of using phyDiv with timeSliceTree #to simulate doing molecular-phylogeny studies #of diversification...in the past set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) taxicDivCont(taxa) #that's the whole diversity curve #with timeSliceTree we could look at the lineage accumulation curve #we'd get of species sampled at a point in time tree <- taxa2phylo(taxa) #use timeSliceTree to make tree of relationships up until time=950 tree950 <- timeSliceTree(tree,sliceTime=950,plot=TRUE,drop.extinct=FALSE) #use drop.extinct=T to only get the tree of lineages extant at time=950 tree950 <- timeSliceTree(tree,sliceTime=950,plot=TRUE,drop.extinct=TRUE) #now its an ultrametric tree with many fewer tips... #lets plot the lineage accumulation plot on a log scale phyloDiv(tree950,plotLogRich=TRUE) #an example of a 'spiky' diversity curve and why split.int is a good thing set.seed(444) record<-simFossilRecord(p=0.1, q=0.1, nruns=1, nTotalTaxa=c(30,40), nExtant=0) taxa<-fossilRecord2fossilTaxa(record) taxaDiv <- taxicDivCont(taxa) #simulate a fossil record with imperfect sampling with sampleRanges() rangesCont <- sampleRanges(taxa,r=0.5) rangesDisc <- binTimeData(rangesCont,int.length=10) #now let's plot with taxicDivDisc() but with the intervals from taxaDiv #by default, split.int=TRUE taxicDivDisc(rangesDisc,int.times=taxaDiv[,1:2],split.int=TRUE) #look pretty #now let's turn off split.int taxicDivDisc(rangesDisc,int.times=taxaDiv[,1:2],split.int=FALSE) #looks 'spiky'! } \author{ David W. Bapst } \seealso{ \code{\link{multiDiv}}, \code{\link{timeSliceTree}}, \code{\link{binTimeData}} There are several different functions for traditional LTT plots (phylogenetic diversity curves), such as the function ,\code{\link{ltt.plot}} in the package ape, the function \code{ltt} in the package phytools, the function \code{plotLtt} in the package laser and the function \code{LTT.average.root} in the package TreeSim. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cn_assess.R \name{pool_auPRs} \alias{pool_auPRs} \title{pool auprs} \usage{ pool_auPRs(perfDF, rng = seq(-4, 10, by = 1)) } \arguments{ \item{perfDF}{perfDF} \item{rng}{seq -4 to 10} } \value{ df } \description{ pool auprs }
/man/pool_auPRs.Rd
permissive
edroaldo/CellNet
R
false
true
305
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cn_assess.R \name{pool_auPRs} \alias{pool_auPRs} \title{pool auprs} \usage{ pool_auPRs(perfDF, rng = seq(-4, 10, by = 1)) } \arguments{ \item{perfDF}{perfDF} \item{rng}{seq -4 to 10} } \value{ df } \description{ pool auprs }
#!/usr/bin/env Rscript # Set the precision to 16 digits: options( digits = 16 ); #' Run benchmarks. #' #' @examples #' main(); main <- function() { # Define benchmark parameters: name <- "asinh"; iterations <- 1000000L; repeats <- 3; #' Print the TAP version. #' #' @examples #' print_version(); print_version <- function() { cat( "TAP version 13\n" ); } #' Print the TAP summary. #' #' @param total Total number of tests. #' @param passing Total number of passing tests. #' #' @examples #' print_summary( 3, 3 ); print_summary <- function( total, passing ) { cat( "#\n" ); cat( paste0( "1..", total, "\n" ) ); # TAP plan cat( paste0( "# total ", total, "\n" ) ); cat( paste0( "# pass ", passing, "\n" ) ); cat( "#\n" ); cat( "# ok\n" ); } #' Print benchmark results. #' #' @param iterations Number of iterations. #' @param elapsed Elapsed time in seconds. #' #' @examples #' print_results( 10000L, 0.131009101868 ); print_results <- function( iterations, elapsed ) { rate <- iterations / elapsed; cat( " ---\n" ); cat( paste0( " iterations: ", iterations, "\n" ) ); cat( paste0( " elapsed: ", elapsed, "\n" ) ); cat( paste0( " rate: ", rate, "\n" ) ); cat( " ...\n" ); } #' Run a benchmark. #' #' ## Notes #' #' * We compute and return a total "elapsed" time, rather than the minimum #' evaluation time, to match benchmark results in other languages (e.g., #' Python). #' #' #' @param iterations Number of Iterations. #' @return Elapsed time in seconds. #' #' @examples #' elapsed <- benchmark( 10000L ); benchmark <- function( iterations ) { # Run the benchmarks: results <- microbenchmark::microbenchmark( asinh( (200.0*runif(1)) - 100.0 ), times = iterations ); # Sum all the raw timing results to get a total "elapsed" time: elapsed <- sum( results$time ); # Convert the elapsed time from nanoseconds to seconds: elapsed <- elapsed / 1.0e9; return( elapsed ); } print_version(); for ( i in 1:repeats ) { cat( paste0( "# r::", name, "\n" ) ); elapsed <- benchmark( iterations ); print_results( iterations, elapsed ); cat( paste0( "ok ", i, " benchmark finished", "\n" ) ); } print_summary( repeats, repeats ); } main();
/lib/node_modules/@stdlib/math/base/special/asinh/benchmark/r/benchmark.R
permissive
UndefinedBehaviour/stdlib
R
false
false
2,268
r
#!/usr/bin/env Rscript # Set the precision to 16 digits: options( digits = 16 ); #' Run benchmarks. #' #' @examples #' main(); main <- function() { # Define benchmark parameters: name <- "asinh"; iterations <- 1000000L; repeats <- 3; #' Print the TAP version. #' #' @examples #' print_version(); print_version <- function() { cat( "TAP version 13\n" ); } #' Print the TAP summary. #' #' @param total Total number of tests. #' @param passing Total number of passing tests. #' #' @examples #' print_summary( 3, 3 ); print_summary <- function( total, passing ) { cat( "#\n" ); cat( paste0( "1..", total, "\n" ) ); # TAP plan cat( paste0( "# total ", total, "\n" ) ); cat( paste0( "# pass ", passing, "\n" ) ); cat( "#\n" ); cat( "# ok\n" ); } #' Print benchmark results. #' #' @param iterations Number of iterations. #' @param elapsed Elapsed time in seconds. #' #' @examples #' print_results( 10000L, 0.131009101868 ); print_results <- function( iterations, elapsed ) { rate <- iterations / elapsed; cat( " ---\n" ); cat( paste0( " iterations: ", iterations, "\n" ) ); cat( paste0( " elapsed: ", elapsed, "\n" ) ); cat( paste0( " rate: ", rate, "\n" ) ); cat( " ...\n" ); } #' Run a benchmark. #' #' ## Notes #' #' * We compute and return a total "elapsed" time, rather than the minimum #' evaluation time, to match benchmark results in other languages (e.g., #' Python). #' #' #' @param iterations Number of Iterations. #' @return Elapsed time in seconds. #' #' @examples #' elapsed <- benchmark( 10000L ); benchmark <- function( iterations ) { # Run the benchmarks: results <- microbenchmark::microbenchmark( asinh( (200.0*runif(1)) - 100.0 ), times = iterations ); # Sum all the raw timing results to get a total "elapsed" time: elapsed <- sum( results$time ); # Convert the elapsed time from nanoseconds to seconds: elapsed <- elapsed / 1.0e9; return( elapsed ); } print_version(); for ( i in 1:repeats ) { cat( paste0( "# r::", name, "\n" ) ); elapsed <- benchmark( iterations ); print_results( iterations, elapsed ); cat( paste0( "ok ", i, " benchmark finished", "\n" ) ); } print_summary( repeats, repeats ); } main();
if(!require(dplyr)) install.packages('dplyr') if(!require(MASS)) install.packages("MASS") if(!require(stringr)) install.packages("stringr") if(!require(ggplot2)) install.packages('ggplot2') if(!require(purrr)) install.packages("purrr") library(stringr) library(purrr) library(ggplot2) syn=amlresistancenetworks::synapseLogin() df = amlresistancenetworks::querySynapseTable("syn22172602") #filter for the samples that have the NRAS and FLT3 mutations with a Tumor VAF>0 newdf <- df[df$Tumor.VAF >0 & (df$Gene == "NRAS" | df$Gene =="FLT3") ,] AML = amlresistancenetworks::querySynapseTable("syn22288960") #create two new columns for NRAS and FLT3 in AML dataframe, with "YES" in column depicting sample has mutation for that gene and "NO" depicting no mutation for that gene AML$NRAS = NA AML$FLT3 = NA n = length(AML$net1) m = length(newdf$AML.sample) for(i in 1:n){ for(j in 1:m){ if(AML$net1[i] == newdf$AML.sample[j]){ if(newdf$Gene[j] == "NRAS"){ AML$NRAS[i] = "YES" AML$FLT3[i] = "NO" } if(newdf$Gene[j] == "FLT3"){ AML$FLT3[i] = "YES" AML$NRAS[i] = "NO" } } } } AML[is.na(AML)] = "NO" #filter for hyp1 and hyp2 to be the same, and filter for just communities hyp_type = AML %>% filter(AML$hyp1 == "proteomics" & AML$hyp2 == "proteomics") community = hyp_type %>% filter(hyp_type$net2_type == "community") community$category = rep('NRAS Mutation',nrow(community)) community$category[grep('NO',community$NRAS)]<-'No NRAS Mutation' #perform ttests for communities, based on grouping whether samples have mutation or not ttest_comm = function(x){ data_NRAS = community %>% filter(net2 == x) normal_NRAS = data_NRAS %>% filter(str_detect(NRAS,'NO')) mutation_NRAS = data_NRAS %>% filter(str_detect(NRAS,'YES')) model = t.test(mutation_NRAS$distance, normal_NRAS$distance, paired = FALSE) return (model) } community_list = unique(community$net2) #print out ttests for each community for (val in community_list){ print(paste("Community number", val)) model = ttest_comm(val) print(model) } pvals<-community_list%>% purrr::map(~ttest_comm(.)$p.value)%>%as.numeric()%>%unlist() #now adding larger data frame to plot res.df<-data.frame(Community=unlist(community_list),PValue=pvals) community <- community%>%rename(Community=net2)%>%left_join(res.df) ggplot(subset(community,PValue<0.3),aes(x=Community,y=distance,fill=category))+geom_boxplot()
/results/beatAMLpatientProfiling/tumor_NRAS_proteomics_hyphalnet.R
no_license
Lthura/amlresistancenetworks
R
false
false
2,452
r
if(!require(dplyr)) install.packages('dplyr') if(!require(MASS)) install.packages("MASS") if(!require(stringr)) install.packages("stringr") if(!require(ggplot2)) install.packages('ggplot2') if(!require(purrr)) install.packages("purrr") library(stringr) library(purrr) library(ggplot2) syn=amlresistancenetworks::synapseLogin() df = amlresistancenetworks::querySynapseTable("syn22172602") #filter for the samples that have the NRAS and FLT3 mutations with a Tumor VAF>0 newdf <- df[df$Tumor.VAF >0 & (df$Gene == "NRAS" | df$Gene =="FLT3") ,] AML = amlresistancenetworks::querySynapseTable("syn22288960") #create two new columns for NRAS and FLT3 in AML dataframe, with "YES" in column depicting sample has mutation for that gene and "NO" depicting no mutation for that gene AML$NRAS = NA AML$FLT3 = NA n = length(AML$net1) m = length(newdf$AML.sample) for(i in 1:n){ for(j in 1:m){ if(AML$net1[i] == newdf$AML.sample[j]){ if(newdf$Gene[j] == "NRAS"){ AML$NRAS[i] = "YES" AML$FLT3[i] = "NO" } if(newdf$Gene[j] == "FLT3"){ AML$FLT3[i] = "YES" AML$NRAS[i] = "NO" } } } } AML[is.na(AML)] = "NO" #filter for hyp1 and hyp2 to be the same, and filter for just communities hyp_type = AML %>% filter(AML$hyp1 == "proteomics" & AML$hyp2 == "proteomics") community = hyp_type %>% filter(hyp_type$net2_type == "community") community$category = rep('NRAS Mutation',nrow(community)) community$category[grep('NO',community$NRAS)]<-'No NRAS Mutation' #perform ttests for communities, based on grouping whether samples have mutation or not ttest_comm = function(x){ data_NRAS = community %>% filter(net2 == x) normal_NRAS = data_NRAS %>% filter(str_detect(NRAS,'NO')) mutation_NRAS = data_NRAS %>% filter(str_detect(NRAS,'YES')) model = t.test(mutation_NRAS$distance, normal_NRAS$distance, paired = FALSE) return (model) } community_list = unique(community$net2) #print out ttests for each community for (val in community_list){ print(paste("Community number", val)) model = ttest_comm(val) print(model) } pvals<-community_list%>% purrr::map(~ttest_comm(.)$p.value)%>%as.numeric()%>%unlist() #now adding larger data frame to plot res.df<-data.frame(Community=unlist(community_list),PValue=pvals) community <- community%>%rename(Community=net2)%>%left_join(res.df) ggplot(subset(community,PValue<0.3),aes(x=Community,y=distance,fill=category))+geom_boxplot()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pacf2phi.R \name{ar.pacf2phi} \alias{ar.pacf2phi} \title{Convert partial autocorrelation coefficients to autoregressive coeffients} \usage{ ar.pacf2phi(pac) } \arguments{ \item{pac}{partial autocorrelation coefficient (vector)} } \description{ Convert partial autocorrelation coefficients to autoregressive coeffients } \keyword{internal}
/man/ar.pacf2phi.Rd
no_license
pedrognicolau/ARbiascorrect-v1
R
false
true
417
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pacf2phi.R \name{ar.pacf2phi} \alias{ar.pacf2phi} \title{Convert partial autocorrelation coefficients to autoregressive coeffients} \usage{ ar.pacf2phi(pac) } \arguments{ \item{pac}{partial autocorrelation coefficient (vector)} } \description{ Convert partial autocorrelation coefficients to autoregressive coeffients } \keyword{internal}
# #dui zhongwen bu youhao. # zhiyao shi tongyi shiyan bianke. yieldAnalysis<-function(dd,area=667) { dd$chanliang<-chanliang(dd,area) #dd$biaozhunchanliang<-biaozhunchanliang(dd) fit<-aov(chanliang~code,data=dd) #summary(fit) library("agricolae") dt<-duncan.test(fit,"code",alpha=0.05) fcfx<-cbind(dt$means[order(dt$means$chanliang,decreasing=TRUE),],dt$groups$M) fcfx$CV<-fcfx$std/fcfx$chanliang*100 check=mean(dd$chanliang[dd$is_ck==1],na.rm=TRUE) fcfx$zc<-(fcfx$chanliang/check-1)*100 #names(fcfx) fcfx$chanliang<-round(fcfx$chanliang,2) fcfx$std<-round(fcfx$std,2) fcfx$Min<-round(fcfx$Min,2) fcfx$Max<-round(fcfx$Max,2) fcfx$CV<-round(fcfx$CV,2) fcfx$zc<-round(fcfx$zc,2) fcfx$px<-(1:nrow(fcfx)) names(fcfx)<-c("yield(kg)","std","rp","Mix(kg)","max(kg)","group(5%)","CV%","more(%)","order") return(fcfx) } # #dui zhongwen bu youhao,zhongwen ban. # pingshu<-function(dd){ dd$zengchan<-zengchan(dd) dd$biaozhunchanliang<-biaozhunchanliang(dd) dd1<-aggregate(dd,by=list(code=dd$code),mean,na.rm=TRUE) trtr<-NULL for (i in 1:nrow(dd1)){ trtr[i]<-paste( dd1[i,]$code, "(", dd1[i,]$laiyuan,")", #think of hr ",pingjun chanliang ",round(dd1[i,]$biaozhunchanliang,2),"kg." , "pingjun bi duizhao zengchan ",round(dd1[i,]$zengchan,2),"%." , "shengyuqi ",round(dd1[i,]$shengyuqi,0),"tian." , "danbaizhihanliang ",round(dd1[i,]$danbai,2),"%,", "zhifanghanliang ",round(dd1[i,]$zhifang,2),"%.", dd1[i,]$jiejiaxixing,"jiejiaxixing,", "zhugao ",round(dd1[i,]$zhugao,2),"cm," , dd1[i,]$huase,"hua,", dd1[i,]$rongmaose,"mao,", "zhujing ",round(dd1[i,]$zhujingjieshu,2),"jie," , "danzhuyouxiaofenzhi ",round(dd1[i,]$fenzhishu,2),"ge,", "danzhuyouxiaojia ",round(dd1[i,]$youxiaojia,2),"ge," , "zili ",dd1[i,]$lixing,"xing,",dd1[i,]$qise,"qi,", "bailizhong ",round(dd1[i,]$bailizhong,2),"g." , "daofujibie ",round(dd1[i,]$daofuxing,2),"ji.", "jing zonghe pingding,gai pingxi jinru ",dd1[i,]$nextstage,"zu.",sep="") } return(trtr) } # # #
/R/yield.R
no_license
zhaoqingsonga/soy
R
false
false
2,094
r
# #dui zhongwen bu youhao. # zhiyao shi tongyi shiyan bianke. yieldAnalysis<-function(dd,area=667) { dd$chanliang<-chanliang(dd,area) #dd$biaozhunchanliang<-biaozhunchanliang(dd) fit<-aov(chanliang~code,data=dd) #summary(fit) library("agricolae") dt<-duncan.test(fit,"code",alpha=0.05) fcfx<-cbind(dt$means[order(dt$means$chanliang,decreasing=TRUE),],dt$groups$M) fcfx$CV<-fcfx$std/fcfx$chanliang*100 check=mean(dd$chanliang[dd$is_ck==1],na.rm=TRUE) fcfx$zc<-(fcfx$chanliang/check-1)*100 #names(fcfx) fcfx$chanliang<-round(fcfx$chanliang,2) fcfx$std<-round(fcfx$std,2) fcfx$Min<-round(fcfx$Min,2) fcfx$Max<-round(fcfx$Max,2) fcfx$CV<-round(fcfx$CV,2) fcfx$zc<-round(fcfx$zc,2) fcfx$px<-(1:nrow(fcfx)) names(fcfx)<-c("yield(kg)","std","rp","Mix(kg)","max(kg)","group(5%)","CV%","more(%)","order") return(fcfx) } # #dui zhongwen bu youhao,zhongwen ban. # pingshu<-function(dd){ dd$zengchan<-zengchan(dd) dd$biaozhunchanliang<-biaozhunchanliang(dd) dd1<-aggregate(dd,by=list(code=dd$code),mean,na.rm=TRUE) trtr<-NULL for (i in 1:nrow(dd1)){ trtr[i]<-paste( dd1[i,]$code, "(", dd1[i,]$laiyuan,")", #think of hr ",pingjun chanliang ",round(dd1[i,]$biaozhunchanliang,2),"kg." , "pingjun bi duizhao zengchan ",round(dd1[i,]$zengchan,2),"%." , "shengyuqi ",round(dd1[i,]$shengyuqi,0),"tian." , "danbaizhihanliang ",round(dd1[i,]$danbai,2),"%,", "zhifanghanliang ",round(dd1[i,]$zhifang,2),"%.", dd1[i,]$jiejiaxixing,"jiejiaxixing,", "zhugao ",round(dd1[i,]$zhugao,2),"cm," , dd1[i,]$huase,"hua,", dd1[i,]$rongmaose,"mao,", "zhujing ",round(dd1[i,]$zhujingjieshu,2),"jie," , "danzhuyouxiaofenzhi ",round(dd1[i,]$fenzhishu,2),"ge,", "danzhuyouxiaojia ",round(dd1[i,]$youxiaojia,2),"ge," , "zili ",dd1[i,]$lixing,"xing,",dd1[i,]$qise,"qi,", "bailizhong ",round(dd1[i,]$bailizhong,2),"g." , "daofujibie ",round(dd1[i,]$daofuxing,2),"ji.", "jing zonghe pingding,gai pingxi jinru ",dd1[i,]$nextstage,"zu.",sep="") } return(trtr) } # # #
#Data revenue <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50) expenses <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96) #Solution #Calculate Profit As The Differences Between Revenue And Expenses profit <- revenue - expenses profit #Calculate Tax As 30% Of Profit And Round To 2 Decimal Points tax <- round(0.30 * profit, 2) tax #Calculate Profit Remaining After Tax Is Deducted profit.after.tax <- profit - tax profit.after.tax #Calculate The Profit Margin As Profit After Tax Over Revenue #Round To 2 Decimal Points, Then Multiply By 100 To Get % profit.margin <- round(profit.after.tax / revenue, 2) * 100 profit.margin #Calculate The Mean Profit After Tax For The 12 Months mean_pat <- mean(profit.after.tax) mean_pat #Find The Months With Above-Mean Profit After Tax good.months <- profit.after.tax > mean_pat good.months #Bad Months Are The Opposite Of Good Months ! bad.months <- profit.after.tax<good.months bad.months #The Best Month Is Where Profit After Tax Was Equal To The Maximum best.month <- profit.after.tax == max(profit.after.tax) best.month #The Worst Month Is Where Profit After Tax Was Equal To The Minimum worst.month <- profit.after.tax == min(profit.after.tax) worst.month #Convert All Calculations To Units Of One Thousand Dollars revenue.1000 <- round(revenue / 1000, 0) expenses.1000 <- round(expenses / 1000, 0) profit.1000 <- round(profit / 1000, 0) profit.after.tax.1000 <- round(profit.after.tax / 1000, 0) #Print Results revenue.1000 expenses.1000 profit.1000 profit.after.tax.1000 profit.margin good.months bad.months best.month worst.month #BONUS: #Preview Of What's Coming In The Next Section #M <- rbind( # revenue.1000, #expenses.1000, #profit.1000, #profit.after.tax.1000, #profit.margin, #good.months, #bad.months, #best.month, #worst.month #) #Print The Matrix #_
/Section3-Homework-FillInTheBlanks.R/Section3-Homework-FillInTheBlanks.R
no_license
desamsetti/R
R
false
false
1,976
r
#Data revenue <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50) expenses <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96) #Solution #Calculate Profit As The Differences Between Revenue And Expenses profit <- revenue - expenses profit #Calculate Tax As 30% Of Profit And Round To 2 Decimal Points tax <- round(0.30 * profit, 2) tax #Calculate Profit Remaining After Tax Is Deducted profit.after.tax <- profit - tax profit.after.tax #Calculate The Profit Margin As Profit After Tax Over Revenue #Round To 2 Decimal Points, Then Multiply By 100 To Get % profit.margin <- round(profit.after.tax / revenue, 2) * 100 profit.margin #Calculate The Mean Profit After Tax For The 12 Months mean_pat <- mean(profit.after.tax) mean_pat #Find The Months With Above-Mean Profit After Tax good.months <- profit.after.tax > mean_pat good.months #Bad Months Are The Opposite Of Good Months ! bad.months <- profit.after.tax<good.months bad.months #The Best Month Is Where Profit After Tax Was Equal To The Maximum best.month <- profit.after.tax == max(profit.after.tax) best.month #The Worst Month Is Where Profit After Tax Was Equal To The Minimum worst.month <- profit.after.tax == min(profit.after.tax) worst.month #Convert All Calculations To Units Of One Thousand Dollars revenue.1000 <- round(revenue / 1000, 0) expenses.1000 <- round(expenses / 1000, 0) profit.1000 <- round(profit / 1000, 0) profit.after.tax.1000 <- round(profit.after.tax / 1000, 0) #Print Results revenue.1000 expenses.1000 profit.1000 profit.after.tax.1000 profit.margin good.months bad.months best.month worst.month #BONUS: #Preview Of What's Coming In The Next Section #M <- rbind( # revenue.1000, #expenses.1000, #profit.1000, #profit.after.tax.1000, #profit.margin, #good.months, #bad.months, #best.month, #worst.month #) #Print The Matrix #_
library(doParallel) library(doRNG) source("coupling.R") source("ising/ising_functions.R") set.seed(1) registerDoParallel(cores = detectCores()) size <- 32 # We're ultimately interested in a low temperature, for which the SSG sampler is not mixing well. beta <- 0.46 NREP <- 5e2 max_iterations <- 1e7 # Single-site Gibbs. ssg_lag <- 1e6 ssg_kernel <- get_ising_ssg_kernel(size, beta) ssg_meeting_times <- foreach(i = 1:NREP, .combine=rbind) %dorng% { res <- simulate_meeting_time(ssg_kernel$kernel, ssg_kernel$coupled_kernel, ssg_kernel$init, max_iterations = max_iterations, L = ssg_lag) res$meeting_time } write.csv(ssg_meeting_times, "ising_ssg_meeting_times.csv") # Parallel tempering. pt_lag <- 2e4 # Introduce a grid of lower values of beta. nchains <- 12 betas_grid <- seq(from = 0.3, to = beta, length.out = nchains) # Probability of performing a swap move rather than a sweep of single site Gibbs updates. proba_swapmove <- 1 / 50 pt_cmp <- function(chain_states1, chain_states2) { all(sapply(1:length(chain_states1), function(i) all(chain_states1[[i]] == chain_states2[[i]]))) } pt_kernel <- get_ising_pt_kernel(size, betas_grid, proba_swapmove) pt_meeting_times <- foreach(i = 1:NREP, .combine=rbind) %dorng% { res <- simulate_meeting_time(pt_kernel$kernel, pt_kernel$coupled_kernel, pt_kernel$init, max_iterations = max_iterations, L = pt_lag, cmp = pt_cmp) res$meeting_time } write.csv(pt_meeting_times, "ising_pt_meeting_times.csv")
/ising/ising_run.R
permissive
niloyb/LlagCouplings
R
false
false
1,461
r
library(doParallel) library(doRNG) source("coupling.R") source("ising/ising_functions.R") set.seed(1) registerDoParallel(cores = detectCores()) size <- 32 # We're ultimately interested in a low temperature, for which the SSG sampler is not mixing well. beta <- 0.46 NREP <- 5e2 max_iterations <- 1e7 # Single-site Gibbs. ssg_lag <- 1e6 ssg_kernel <- get_ising_ssg_kernel(size, beta) ssg_meeting_times <- foreach(i = 1:NREP, .combine=rbind) %dorng% { res <- simulate_meeting_time(ssg_kernel$kernel, ssg_kernel$coupled_kernel, ssg_kernel$init, max_iterations = max_iterations, L = ssg_lag) res$meeting_time } write.csv(ssg_meeting_times, "ising_ssg_meeting_times.csv") # Parallel tempering. pt_lag <- 2e4 # Introduce a grid of lower values of beta. nchains <- 12 betas_grid <- seq(from = 0.3, to = beta, length.out = nchains) # Probability of performing a swap move rather than a sweep of single site Gibbs updates. proba_swapmove <- 1 / 50 pt_cmp <- function(chain_states1, chain_states2) { all(sapply(1:length(chain_states1), function(i) all(chain_states1[[i]] == chain_states2[[i]]))) } pt_kernel <- get_ising_pt_kernel(size, betas_grid, proba_swapmove) pt_meeting_times <- foreach(i = 1:NREP, .combine=rbind) %dorng% { res <- simulate_meeting_time(pt_kernel$kernel, pt_kernel$coupled_kernel, pt_kernel$init, max_iterations = max_iterations, L = pt_lag, cmp = pt_cmp) res$meeting_time } write.csv(pt_meeting_times, "ising_pt_meeting_times.csv")
# run code before source('R/00_loadpackages.R') source('R/00_vis_custom.R') source('R/01_load_wrangle.R') source('R/01_guana_wrangle_tidy.R') # all sites first dat4 %>% filter(component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_y_continuous(expand = c(0,0))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels='%b-%y')+ labs(x = '', y = phos_y_title, title = "All Guana Water Quality Sites") # ---------------------------------------------------------------------- # all lake sites with open water and water control structure sites shown # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "Lake" & component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site, linetype = sitetype), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site, shape = sitetype), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ labs(x = '', y = phos_y_title, title = "Lake Sites", caption = "Sites near water control structures are sampled for hydrologic connectivity and not used for waterbody assessments") # ---------------------------------------------------------------------- # all river sites with regulation sites and excluded sites shown # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "River" & component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site, linetype = sitetype), size = 1) + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = site, shape = sitetype), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "River Sites") # ---------------------------------------------------------------------- # looking into values more specifically, color differentiates threshold # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "River" & component_short == "TP") %>% ggplot() + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = result > 0.105, shape = sitetype), size = 3) + theme_classic() + theme(# everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), plot.subtitle = element_text(size = 11, face = 'italic'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ scale_colour_manual(values = c("darkturquoise", "orange")) + labs(x = '', y = phos_y_title, title = "River Sites", subtitle = "Threshold exceedances indicated by color change", caption = "Sites near water control structures are sampled for hydrologic connectivity and not used for waterbody assessments") # ---------------------------------------------------------------------- # just open water sites for regulation # ---------------------------------------------------------------------- # all open water sites dat4 %>% filter(component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "Only the Open Water Guana Water Quality Sites") # lake dat4 %>% filter(WBID == "Lake" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_y_continuous(expand = c(0,0))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ labs(x = '', y = phos_y_title, title = "Open Water Lake Sites") # river dat4 %>% filter(WBID == "River" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "Open Water River Sites") dat4 %>% filter(WBID == "River" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = result > 0.105, shape = site), size = 3) + theme_classic() + theme(# everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ scale_colour_manual(values = c("darkturquoise", "orange"))+ labs(x = '', y = phos_y_title, title = "Open Water River Sites")
/R/Working/02_totalphosphorus.R
no_license
skdunnigan/guana
R
false
false
11,455
r
# run code before source('R/00_loadpackages.R') source('R/00_vis_custom.R') source('R/01_load_wrangle.R') source('R/01_guana_wrangle_tidy.R') # all sites first dat4 %>% filter(component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_y_continuous(expand = c(0,0))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels='%b-%y')+ labs(x = '', y = phos_y_title, title = "All Guana Water Quality Sites") # ---------------------------------------------------------------------- # all lake sites with open water and water control structure sites shown # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "Lake" & component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site, linetype = sitetype), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site, shape = sitetype), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ labs(x = '', y = phos_y_title, title = "Lake Sites", caption = "Sites near water control structures are sampled for hydrologic connectivity and not used for waterbody assessments") # ---------------------------------------------------------------------- # all river sites with regulation sites and excluded sites shown # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "River" & component_short == "TP") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site, linetype = sitetype), size = 1) + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = site, shape = sitetype), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "River Sites") # ---------------------------------------------------------------------- # looking into values more specifically, color differentiates threshold # ---------------------------------------------------------------------- dat4 %>% filter(WBID == "River" & component_short == "TP") %>% ggplot() + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = result > 0.105, shape = sitetype), size = 3) + theme_classic() + theme(# everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), plot.subtitle = element_text(size = 11, face = 'italic'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ scale_colour_manual(values = c("darkturquoise", "orange")) + labs(x = '', y = phos_y_title, title = "River Sites", subtitle = "Threshold exceedances indicated by color change", caption = "Sites near water control structures are sampled for hydrologic connectivity and not used for waterbody assessments") # ---------------------------------------------------------------------- # just open water sites for regulation # ---------------------------------------------------------------------- # all open water sites dat4 %>% filter(component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "Only the Open Water Guana Water Quality Sites") # lake dat4 %>% filter(WBID == "Lake" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95'))+ scale_y_continuous(expand = c(0,0))+ scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ labs(x = '', y = phos_y_title, title = "Open Water Lake Sites") # river dat4 %>% filter(WBID == "River" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_line(aes(x = date_sampled, y = result, color = site), size = 1) + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = site), size = 3) + scale_colour_manual(values = sitecolours) + theme_classic() + theme(legend.title = element_blank(), # everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y') + labs(x = '', y = phos_y_title, title = "Open Water River Sites") dat4 %>% filter(WBID == "River" & component_short == "TP" & sitetype == "OpenWater") %>% ggplot() + geom_hline(yintercept = 0.105, linetype = 'longdash', color = 'gray18', size = 1.5) + geom_point(aes(x = date_sampled, y = result, color = result > 0.105, shape = site), size = 3) + theme_classic() + theme(# everything in theme is strictly aesthetics legend.position = "bottom", legend.text = element_text(size=12), axis.title.x = element_blank(), axis.title.y = element_text(size=13), axis.ticks = element_line(color='black'), plot.caption = element_text(size=6, face='italic'), axis.text.x = element_text(angle = 90, vjust=0.3, size=12, color='black'), axis.text.y = element_text(size=12, color='black'), axis.ticks.x = element_line(color='black'), plot.title = element_text(size = 16, face='bold'), panel.grid.minor = element_blank(), panel.grid.major = element_line(color='gray95')) + scale_y_continuous(expand = c(0,0)) + scale_x_datetime(date_breaks = '1 month', date_minor_breaks = '2 weeks', date_labels = '%b-%y')+ scale_colour_manual(values = c("darkturquoise", "orange"))+ labs(x = '', y = phos_y_title, title = "Open Water River Sites")
## global.R ## # 加载R包----- enableBookmarking(store = "url") library(DTedit) library(shiny); library(shinydashboard); library(tsda); library(tsdo); library(tsui); library(shinyalert); DTeditCn::setDTtoCn() # 设置引入页----- source('00_data.R',encoding = 'utf-8'); source('00_conn.R',encoding = 'utf-8'); source('topbarMenu.R',encoding = 'utf-8'); source('sideBarSetting.R',encoding = 'utf-8'); source('01_row_body.R',encoding = 'utf-8'); source('02_column_body.R',encoding = 'utf-8'); source('03_book_body.R',encoding = 'utf-8'); source('04_series_body.R',encoding = 'utf-8'); source('05_majority_body.R',encoding = 'utf-8'); source('06_tutor_body.R',encoding = 'utf-8'); source('99_sysSetting_body.R',encoding = 'utf-8'); source('workAreaSetting.R',encoding = 'utf-8')
/global.R
no_license
takewiki/lcsd
R
false
false
790
r
## global.R ## # 加载R包----- enableBookmarking(store = "url") library(DTedit) library(shiny); library(shinydashboard); library(tsda); library(tsdo); library(tsui); library(shinyalert); DTeditCn::setDTtoCn() # 设置引入页----- source('00_data.R',encoding = 'utf-8'); source('00_conn.R',encoding = 'utf-8'); source('topbarMenu.R',encoding = 'utf-8'); source('sideBarSetting.R',encoding = 'utf-8'); source('01_row_body.R',encoding = 'utf-8'); source('02_column_body.R',encoding = 'utf-8'); source('03_book_body.R',encoding = 'utf-8'); source('04_series_body.R',encoding = 'utf-8'); source('05_majority_body.R',encoding = 'utf-8'); source('06_tutor_body.R',encoding = 'utf-8'); source('99_sysSetting_body.R',encoding = 'utf-8'); source('workAreaSetting.R',encoding = 'utf-8')
context("check_TF") test_that("check_TF", { expect_null(check_TF(TRUE)) expect_null(check_TF(FALSE)) mustBe <- NA expect_error(check_TF(mustBe), '`mustBe = NA` but must be TRUE or FALSE. Change `mustBe` to be TRUE or FALSE.') mustBe <- 1 expect_error(check_TF(mustBe), '`mustBe` was type double but must be logical. Change `mustBe` to be TRUE or FALSE.') mustBe <- c(TRUE, FALSE) expect_error(check_TF(mustBe), '`mustBe` had length 2 but must be length-one. Change `mustBe` to be TRUE or FALSE.') }) test_that("check_num1", { expect_null(check_num1(1L)) expect_null(check_num1(1)) mm <- 1:5 expect_error(check_num1(mm), "`mm` had length 5 but must be length-one.", fixed = TRUE) mm <- "mm" expect_error(check_num1(mm), "`mm` was type character", fixed = TRUE) mm <- NA_real_ expect_error(check_num1(mm), "`mm = NA` but must be a non-missing numeric.", fixed = TRUE) })
/tests/testthat/test_check_TF.R
no_license
cran/grattan
R
false
false
1,079
r
context("check_TF") test_that("check_TF", { expect_null(check_TF(TRUE)) expect_null(check_TF(FALSE)) mustBe <- NA expect_error(check_TF(mustBe), '`mustBe = NA` but must be TRUE or FALSE. Change `mustBe` to be TRUE or FALSE.') mustBe <- 1 expect_error(check_TF(mustBe), '`mustBe` was type double but must be logical. Change `mustBe` to be TRUE or FALSE.') mustBe <- c(TRUE, FALSE) expect_error(check_TF(mustBe), '`mustBe` had length 2 but must be length-one. Change `mustBe` to be TRUE or FALSE.') }) test_that("check_num1", { expect_null(check_num1(1L)) expect_null(check_num1(1)) mm <- 1:5 expect_error(check_num1(mm), "`mm` had length 5 but must be length-one.", fixed = TRUE) mm <- "mm" expect_error(check_num1(mm), "`mm` was type character", fixed = TRUE) mm <- NA_real_ expect_error(check_num1(mm), "`mm = NA` but must be a non-missing numeric.", fixed = TRUE) })
tar_test("tar_pid()", { tar_script(tar_target(x, 1)) tar_make(callr_function = NULL) expect_equal(tar_pid(), Sys.getpid()) }) tar_test("custom script and store args", { skip_cran() expect_equal(tar_config_get("script"), path_script_default()) expect_equal(tar_config_get("store"), path_store_default()) tar_script({ list( tar_target(w, letters) ) }, script = "example/script.R") tar_make( callr_function = NULL, script = "example/script.R", store = "example/store" ) expect_true(is.numeric(tar_pid(store = "example/store"))) expect_false(file.exists("_targets.yaml")) expect_equal(tar_config_get("script"), path_script_default()) expect_equal(tar_config_get("store"), path_store_default()) expect_false(file.exists(path_script_default())) expect_false(file.exists(path_store_default())) expect_true(file.exists("example/script.R")) expect_true(file.exists("example/store")) expect_true(file.exists("example/store/meta/meta")) expect_true(file.exists("example/store/objects/w")) tar_config_set(script = "x") expect_equal(tar_config_get("script"), "x") expect_true(file.exists("_targets.yaml")) })
/tests/testthat/test-tar_pid.R
permissive
ropensci/targets
R
false
false
1,169
r
tar_test("tar_pid()", { tar_script(tar_target(x, 1)) tar_make(callr_function = NULL) expect_equal(tar_pid(), Sys.getpid()) }) tar_test("custom script and store args", { skip_cran() expect_equal(tar_config_get("script"), path_script_default()) expect_equal(tar_config_get("store"), path_store_default()) tar_script({ list( tar_target(w, letters) ) }, script = "example/script.R") tar_make( callr_function = NULL, script = "example/script.R", store = "example/store" ) expect_true(is.numeric(tar_pid(store = "example/store"))) expect_false(file.exists("_targets.yaml")) expect_equal(tar_config_get("script"), path_script_default()) expect_equal(tar_config_get("store"), path_store_default()) expect_false(file.exists(path_script_default())) expect_false(file.exists(path_store_default())) expect_true(file.exists("example/script.R")) expect_true(file.exists("example/store")) expect_true(file.exists("example/store/meta/meta")) expect_true(file.exists("example/store/objects/w")) tar_config_set(script = "x") expect_equal(tar_config_get("script"), "x") expect_true(file.exists("_targets.yaml")) })
args = (commandArgs(TRUE)); chr = as.numeric(args[1]); mafrv = as.numeric(args[2]); genotype.file = args[3];# converted vcf file; output from vcf_format_change_v2.pl phenotype.file = args[4]; gene.file = args[5]; output.file = args[6]; nc = args[7]; cat("Input parameters:\n","chr = ",chr,"\n","mafrv = ",mafrv,"\n","genotype file = ",genotype.file,"\n","phenotype file = ",phenotype.file,"\n","gene file = ",gene.file,"\n","output file name = ",output.file,"\n",sep=""); source("ARIELqQT.R") # ARIEL for SNP quality incorporation, QT data #fregions <- "gene_info" # 4 columns: gene.name, chr, start, end regions <- read.table(gene.file);#,quote="",header=FALSE,sep="\t"); chr.regions <- regions[regions[,2]==chr,]; starts <- chr.regions[,3]; ends <- chr.regions[,4]; nregions <- length(ends); rm(regions); gc(); #fpheno <- paste("pheno.txt",sep=""); pheno <- read.table(phenotype.file);#,quote="",header=FALSE,sep="\t"); pheno <- unlist(pheno[,6]) # column 6 of PED file is phenotype } idlength <- length(pheno)*3; #genofile <- paste("vcf_converted.txt",sep=""); genodata <- read.table(genotype.file,na.strings=".",colClasses=c('character','numeric',rep('character',3),rep('numeric',idlength)));#,header=FALSE,quote="",sep="\t"); genodata <- genodata[,-3]; # rm rsIDs column b/c not needed for analysis chrgenodata <- genodata[genodata[,1]==chr,]; rm(genodata); gc(); output <-file(output.file,"w"); for (i in 1:nregions) { data <- ariel.data.fn(pheno,chrgenodata,chr=chr,start=starts[i],end=ends[i],nc=nc); nsnps <- length(data$geno[,1]); gene <- c(as.character(chr.regions[i,1]),nsnps,chr,starts[i],ends[i]); print(c(i,gene,dim(data$geno)[1])); if (dim(data$geno)[1]<=1) { print("Geno is null!"); } else { probs <- ARIELq.fn(y=data$pheno,g=data$geno,qgeno=data$qgeno,mafrv=mafrv); print(probs); cat(gene,probs,sep="\t",file=output); cat("\n",file=output); } } close(output); q(save="no");
/R/ARIELqQT_script_general.R
no_license
jennasimit/ARIEL
R
false
false
2,100
r
args = (commandArgs(TRUE)); chr = as.numeric(args[1]); mafrv = as.numeric(args[2]); genotype.file = args[3];# converted vcf file; output from vcf_format_change_v2.pl phenotype.file = args[4]; gene.file = args[5]; output.file = args[6]; nc = args[7]; cat("Input parameters:\n","chr = ",chr,"\n","mafrv = ",mafrv,"\n","genotype file = ",genotype.file,"\n","phenotype file = ",phenotype.file,"\n","gene file = ",gene.file,"\n","output file name = ",output.file,"\n",sep=""); source("ARIELqQT.R") # ARIEL for SNP quality incorporation, QT data #fregions <- "gene_info" # 4 columns: gene.name, chr, start, end regions <- read.table(gene.file);#,quote="",header=FALSE,sep="\t"); chr.regions <- regions[regions[,2]==chr,]; starts <- chr.regions[,3]; ends <- chr.regions[,4]; nregions <- length(ends); rm(regions); gc(); #fpheno <- paste("pheno.txt",sep=""); pheno <- read.table(phenotype.file);#,quote="",header=FALSE,sep="\t"); pheno <- unlist(pheno[,6]) # column 6 of PED file is phenotype } idlength <- length(pheno)*3; #genofile <- paste("vcf_converted.txt",sep=""); genodata <- read.table(genotype.file,na.strings=".",colClasses=c('character','numeric',rep('character',3),rep('numeric',idlength)));#,header=FALSE,quote="",sep="\t"); genodata <- genodata[,-3]; # rm rsIDs column b/c not needed for analysis chrgenodata <- genodata[genodata[,1]==chr,]; rm(genodata); gc(); output <-file(output.file,"w"); for (i in 1:nregions) { data <- ariel.data.fn(pheno,chrgenodata,chr=chr,start=starts[i],end=ends[i],nc=nc); nsnps <- length(data$geno[,1]); gene <- c(as.character(chr.regions[i,1]),nsnps,chr,starts[i],ends[i]); print(c(i,gene,dim(data$geno)[1])); if (dim(data$geno)[1]<=1) { print("Geno is null!"); } else { probs <- ARIELq.fn(y=data$pheno,g=data$geno,qgeno=data$qgeno,mafrv=mafrv); print(probs); cat(gene,probs,sep="\t",file=output); cat("\n",file=output); } } close(output); q(save="no");
library("poibin") args<-commandArgs(TRUE) probFile<-args[1] #"~/Documents/Lab/SnyderLab/LocalMutationRate/WindowProbs/RegionProbs_chr22_1.csv" countsFile<-args[2] #"/Users/cmelton/Documents/Aptana Studio 3 Workspace/GenomeScanner/output/Mar5/OneBP/RawOutput/WholeGenomeWindows_1a.1b.1d.1f.2a.2b.2c.3a.3b.4.5_chr22_1.txt" outputFile<-args[3] size<-as.numeric(args[4]) print(args[4]) print(size) # read in counts and probabilities for a chromosome probs<-read.csv(probFile, header=T, sep=",", stringsAsFactors=F) probs$regionID<-sapply(probs$regionID, function(prob) paste(strsplit(x=prob, split="_", fixed=T)[[1]][1],as.character(as.numeric(strsplit(x=prob, split="_", fixed=T)[[1]][2])+size-1), sep="_")) #get rid of redundant cols realNames<-sapply(colnames(probs), function(prob) strsplit(x=prob, split=".[x,y]", fixed=F)[[1]]) print(realNames) probs<-probs[,!duplicated(realNames)] colnames(probs)<-sapply(colnames(probs), function(prob) strsplit(x=prob, split=".[x,y]", fixed=F)[[1]]) pids<-colnames(probs)[2:length(probs)] pids[sapply(pids, nchar)==37]<-substr(pids[sapply(pids, nchar)==37], 2, 37) colnames(probs)[2:length(probs)]<-pids counts<-read.delim(countsFile, header=T, stringsAsFactors=F) counts$regionID<-paste(counts$chrom, counts$pos, sep="_") m<-merge(probs, counts[,c("regionID", "count", "patient_ids")], by="regionID", all.x=T, all.y=F) m<-m[!is.na(m$count),] # function to compute poisson binomial prob k>=K for a row of merged data getProb<-function(data, i, pids, countcol="count") { pids<-gsub(pattern = "-", replacement = ".", x = pids) pids<-pids[pids%in%colnames(data)] print(length(pids)) probs<-as.numeric(data[i,pids]) cts<-as.numeric(data[i,countcol]) print(cts) return(1-ppoibin(cts-1, probs)) } probs<-sapply(1:length(m$regionID), function(i) getProb(m, i, pids)) # generate results data frame result<-data.frame(regionID=m$regionID, prob=probs, number_of_tumors=m$count, patients=m$patient_ids) # write output to file write.csv(result, file=outputFile, row.names=F, quote=F)
/PoissonBinomial/PoiBinProbsNew.R
no_license
corner0426/RecurrentMutationStats
R
false
false
2,033
r
library("poibin") args<-commandArgs(TRUE) probFile<-args[1] #"~/Documents/Lab/SnyderLab/LocalMutationRate/WindowProbs/RegionProbs_chr22_1.csv" countsFile<-args[2] #"/Users/cmelton/Documents/Aptana Studio 3 Workspace/GenomeScanner/output/Mar5/OneBP/RawOutput/WholeGenomeWindows_1a.1b.1d.1f.2a.2b.2c.3a.3b.4.5_chr22_1.txt" outputFile<-args[3] size<-as.numeric(args[4]) print(args[4]) print(size) # read in counts and probabilities for a chromosome probs<-read.csv(probFile, header=T, sep=",", stringsAsFactors=F) probs$regionID<-sapply(probs$regionID, function(prob) paste(strsplit(x=prob, split="_", fixed=T)[[1]][1],as.character(as.numeric(strsplit(x=prob, split="_", fixed=T)[[1]][2])+size-1), sep="_")) #get rid of redundant cols realNames<-sapply(colnames(probs), function(prob) strsplit(x=prob, split=".[x,y]", fixed=F)[[1]]) print(realNames) probs<-probs[,!duplicated(realNames)] colnames(probs)<-sapply(colnames(probs), function(prob) strsplit(x=prob, split=".[x,y]", fixed=F)[[1]]) pids<-colnames(probs)[2:length(probs)] pids[sapply(pids, nchar)==37]<-substr(pids[sapply(pids, nchar)==37], 2, 37) colnames(probs)[2:length(probs)]<-pids counts<-read.delim(countsFile, header=T, stringsAsFactors=F) counts$regionID<-paste(counts$chrom, counts$pos, sep="_") m<-merge(probs, counts[,c("regionID", "count", "patient_ids")], by="regionID", all.x=T, all.y=F) m<-m[!is.na(m$count),] # function to compute poisson binomial prob k>=K for a row of merged data getProb<-function(data, i, pids, countcol="count") { pids<-gsub(pattern = "-", replacement = ".", x = pids) pids<-pids[pids%in%colnames(data)] print(length(pids)) probs<-as.numeric(data[i,pids]) cts<-as.numeric(data[i,countcol]) print(cts) return(1-ppoibin(cts-1, probs)) } probs<-sapply(1:length(m$regionID), function(i) getProb(m, i, pids)) # generate results data frame result<-data.frame(regionID=m$regionID, prob=probs, number_of_tumors=m$count, patients=m$patient_ids) # write output to file write.csv(result, file=outputFile, row.names=F, quote=F)
library(pbdMPI, quiet=TRUE) init() ## ## comm.rank <- function(comm=0) 0 ## rank 0 writes a bunch of integers into a binary file if(comm.rank() == 0) { ## write a vector of doubles to a binary file length <- 32 x <- seq(0.5, length, 1) writeBin(x, "binary.vector.file") } if(comm.rank() == 1) { ncol <- 8 nrow <- 4 x <- matrix(seq(0.5, nrow*ncol, 1), nrow=nrow) writeBin(as.vector(x), "binary.matrix.file") } finalize()
/pbdr/common/scripts/input/1_writebinary.r
no_license
RBigData/pbd-tutorial
R
false
false
455
r
library(pbdMPI, quiet=TRUE) init() ## ## comm.rank <- function(comm=0) 0 ## rank 0 writes a bunch of integers into a binary file if(comm.rank() == 0) { ## write a vector of doubles to a binary file length <- 32 x <- seq(0.5, length, 1) writeBin(x, "binary.vector.file") } if(comm.rank() == 1) { ncol <- 8 nrow <- 4 x <- matrix(seq(0.5, nrow*ncol, 1), nrow=nrow) writeBin(as.vector(x), "binary.matrix.file") } finalize()
library(testthat) library(tximport) library(tximportData) library(readr) test_check("tximport")
/2016-SIB/data/rnaseq/tximport-master/tests/testthat.R
no_license
wurmlab/genomicscourse
R
false
false
96
r
library(testthat) library(tximport) library(tximportData) library(readr) test_check("tximport")
babyData = read.csv('week_09/data/birthweight_reduced.csv') head(babyData, n=3) dim(babyData) babyData = na.omit(babyData); dim(babyData) class(babyData$smoker) babyData$smoker<-factor(babyData$smoker, labels=c('Non-smoker','Smoker')) # Проверка на мультиколлинеарность attach(babyData) pairs(~Birthweight+Gestation+mheight+mppwt, main='Birth weight scatterplots', col=c('red','blue')[smoker], pch=c(1,4)[smoker]) round(cor(cbind(Birthweight,Gestation,mppwt,mheight)),2) # Строим регрессионную модель на основе независимых предикторов reg1<-lm(Birthweight~Gestation+smoker+mppwt) summary(reg1) # Проверка предположений plot(reg1) # Можно построить все графики сразу, # а можно каждый по отдельности. plot(reg1, which = 1) plot(reg1, which = 2) hist(resid(reg1), xlim = range(c(-2.5,2.5)), main='Histogram of residuals', xlab='Standardised residuals',ylab='Frequency') ## Считаем VIF library(car) vif(reg1) # Но: vif(lm(Birthweight~Gestation+smoker+mheight+mppwt)) # Или с помощью пакета usdm library(usdm) independents <- data.frame(cbind(Gestation,smoker,mppwt)) vif(independents) ## Проверяем на выбросы (рычаг и остатки) plot(reg1, which = 5) ## Автокорреляция остатков: тест Дурбина-Ватсона # library(car) dwt(reg1)
/lectures/07/scripts/regression_mult.R
no_license
dkhramov/iad_2020
R
false
false
1,552
r
babyData = read.csv('week_09/data/birthweight_reduced.csv') head(babyData, n=3) dim(babyData) babyData = na.omit(babyData); dim(babyData) class(babyData$smoker) babyData$smoker<-factor(babyData$smoker, labels=c('Non-smoker','Smoker')) # Проверка на мультиколлинеарность attach(babyData) pairs(~Birthweight+Gestation+mheight+mppwt, main='Birth weight scatterplots', col=c('red','blue')[smoker], pch=c(1,4)[smoker]) round(cor(cbind(Birthweight,Gestation,mppwt,mheight)),2) # Строим регрессионную модель на основе независимых предикторов reg1<-lm(Birthweight~Gestation+smoker+mppwt) summary(reg1) # Проверка предположений plot(reg1) # Можно построить все графики сразу, # а можно каждый по отдельности. plot(reg1, which = 1) plot(reg1, which = 2) hist(resid(reg1), xlim = range(c(-2.5,2.5)), main='Histogram of residuals', xlab='Standardised residuals',ylab='Frequency') ## Считаем VIF library(car) vif(reg1) # Но: vif(lm(Birthweight~Gestation+smoker+mheight+mppwt)) # Или с помощью пакета usdm library(usdm) independents <- data.frame(cbind(Gestation,smoker,mppwt)) vif(independents) ## Проверяем на выбросы (рычаг и остатки) plot(reg1, which = 5) ## Автокорреляция остатков: тест Дурбина-Ватсона # library(car) dwt(reg1)
\name{gridData} \alias{gridData} \title{ Apply a grid (2D bins) to nodiv objects of type 'points' } \description{ Takes an object of type \code{distrib_data} or \code{nodiv_data}, and puts the point distributions on a grid. Can also be used for resampling grid data at a coarser scale. If the comm matrix has abundances (any one value larger than 1), they are summed across points in the grid cell. } \usage{ gridData(dist_data, cellsize_x = 1, cellsize_y = cellsize_x, xll_corner, yll_corner) } \arguments{ \item{dist_data}{ An object of type \code{distrib_data} or \code{nodiv_data} } \item{cellsize_x}{ The size of the bins along the X (longitudinal) axis, in units of the spatial coordinates of the original points } \item{cellsize_y}{ The size of the bins along the Y (latitudinal) axis, in units of the spatial coordinates of the original points } \item{xll_corner}{ The x coordinate of the lower left corner of the lower left grid cell of the intended grid. The corner must be left of all points in the data set } \item{yll_corner}{ The y coordinate of the lower left corner of the lower left grid cell of the intended grid. The corner must be below all points in the data set } } \value{ The return value is the original \code{dist_data} object with gridded distribution data } \author{ Michael Krabbe Borregaard } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{distrib_data}}, ~~~ } \examples{ data(coquettes) new <- gridData(coquettes, 2) plot_richness(new) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/gridData.Rd
no_license
barnabywalker/nodiv
R
false
false
1,692
rd
\name{gridData} \alias{gridData} \title{ Apply a grid (2D bins) to nodiv objects of type 'points' } \description{ Takes an object of type \code{distrib_data} or \code{nodiv_data}, and puts the point distributions on a grid. Can also be used for resampling grid data at a coarser scale. If the comm matrix has abundances (any one value larger than 1), they are summed across points in the grid cell. } \usage{ gridData(dist_data, cellsize_x = 1, cellsize_y = cellsize_x, xll_corner, yll_corner) } \arguments{ \item{dist_data}{ An object of type \code{distrib_data} or \code{nodiv_data} } \item{cellsize_x}{ The size of the bins along the X (longitudinal) axis, in units of the spatial coordinates of the original points } \item{cellsize_y}{ The size of the bins along the Y (latitudinal) axis, in units of the spatial coordinates of the original points } \item{xll_corner}{ The x coordinate of the lower left corner of the lower left grid cell of the intended grid. The corner must be left of all points in the data set } \item{yll_corner}{ The y coordinate of the lower left corner of the lower left grid cell of the intended grid. The corner must be below all points in the data set } } \value{ The return value is the original \code{dist_data} object with gridded distribution data } \author{ Michael Krabbe Borregaard } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{distrib_data}}, ~~~ } \examples{ data(coquettes) new <- gridData(coquettes, 2) plot_richness(new) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.cec.R \name{plot.cec} \alias{plot.cec} \title{Plot CEC Objects} \usage{ \method{plot}{cec}( x, col, cex = 0.5, pch = 19, cex.centers = 1, pch.centers = 8, ellipses = TRUE, ellipses.lwd = 4, ellipses.lty = 2, model = TRUE, xlab, ylab, ... ) } \arguments{ \item{x}{A \code{\link{cec}} object resulting from the \code{\link{cec}} function.} \item{col}{A specification for the default plotting color of the points in the clusters. See \code{\link{par}} for more details.} \item{cex}{A numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. See \code{\link{par}} for more details.} \item{pch}{Either an integer specifying a symbol or a single character to be used as the default in plotting points. See \code{\link{par}} for more details.} \item{cex.centers}{The same as \code{cex}, except that it applies only to the centers' means.} \item{pch.centers}{The same as \code{pch}, except that it applies only to the centers' means.} \item{ellipses}{If this parameter is TRUE, covariance ellipses will be drawn.} \item{ellipses.lwd}{The line width of the covariance ellipses. See \code{lwd} in \code{\link{par}} for more details.} \item{ellipses.lty}{The line type of the covariance ellipses. See \code{lty} in \code{\link{par}} for more details.} \item{model}{If this parameter is TRUE, the model (expected) covariance will be used for each cluster instead of the sample covariance (MLE) of the points in the cluster, when drawing the covariance ellipses.} \item{xlab}{A label for the x axis. See \link{plot} for more details.} \item{ylab}{A label for the y axis. See \link{plot} for more details.} \item{...}{Additional arguments passed to \code{plot} when drawing data points.} } \value{ This function returns nothing. } \description{ \code{plot.cec} presents the results from the \code{\link{cec}} function in the form of a plot. The colors of the data points represent the cluster they belong to. Ellipses are drawn to represent the covariance (of either the model or the sample) of each cluster. } \examples{ ## See the examples provided with the cec() function. } \seealso{ \code{\link{cec}}, \code{\link{print.cec}} } \keyword{hplot}
/man/plot.cec.Rd
no_license
cran/CEC
R
false
true
2,326
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.cec.R \name{plot.cec} \alias{plot.cec} \title{Plot CEC Objects} \usage{ \method{plot}{cec}( x, col, cex = 0.5, pch = 19, cex.centers = 1, pch.centers = 8, ellipses = TRUE, ellipses.lwd = 4, ellipses.lty = 2, model = TRUE, xlab, ylab, ... ) } \arguments{ \item{x}{A \code{\link{cec}} object resulting from the \code{\link{cec}} function.} \item{col}{A specification for the default plotting color of the points in the clusters. See \code{\link{par}} for more details.} \item{cex}{A numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. See \code{\link{par}} for more details.} \item{pch}{Either an integer specifying a symbol or a single character to be used as the default in plotting points. See \code{\link{par}} for more details.} \item{cex.centers}{The same as \code{cex}, except that it applies only to the centers' means.} \item{pch.centers}{The same as \code{pch}, except that it applies only to the centers' means.} \item{ellipses}{If this parameter is TRUE, covariance ellipses will be drawn.} \item{ellipses.lwd}{The line width of the covariance ellipses. See \code{lwd} in \code{\link{par}} for more details.} \item{ellipses.lty}{The line type of the covariance ellipses. See \code{lty} in \code{\link{par}} for more details.} \item{model}{If this parameter is TRUE, the model (expected) covariance will be used for each cluster instead of the sample covariance (MLE) of the points in the cluster, when drawing the covariance ellipses.} \item{xlab}{A label for the x axis. See \link{plot} for more details.} \item{ylab}{A label for the y axis. See \link{plot} for more details.} \item{...}{Additional arguments passed to \code{plot} when drawing data points.} } \value{ This function returns nothing. } \description{ \code{plot.cec} presents the results from the \code{\link{cec}} function in the form of a plot. The colors of the data points represent the cluster they belong to. Ellipses are drawn to represent the covariance (of either the model or the sample) of each cluster. } \examples{ ## See the examples provided with the cec() function. } \seealso{ \code{\link{cec}}, \code{\link{print.cec}} } \keyword{hplot}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rabbitmq.R \name{rabbitmq_parse_uri} \alias{rabbitmq_parse_uri} \title{parse the RabbiMQ URI.} \usage{ rabbitmq_parse_uri(uri, prefix = "", port = 15672) } \arguments{ \item{uri}{the amqp URI} \item{prefix}{the prefix that the RabbitMQ managmenet interface uses} \item{port}{the port for rabbitmq managment interface} } \value{ a list that contains the url to the mangement interface, username password and vhost. } \description{ This will parse the uri into smaller pieces that can be used to talk to the rest endpoint for RabbitMQ. }
/base/remote/man/rabbitmq_parse_uri.Rd
permissive
PecanProject/pecan
R
false
true
616
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rabbitmq.R \name{rabbitmq_parse_uri} \alias{rabbitmq_parse_uri} \title{parse the RabbiMQ URI.} \usage{ rabbitmq_parse_uri(uri, prefix = "", port = 15672) } \arguments{ \item{uri}{the amqp URI} \item{prefix}{the prefix that the RabbitMQ managmenet interface uses} \item{port}{the port for rabbitmq managment interface} } \value{ a list that contains the url to the mangement interface, username password and vhost. } \description{ This will parse the uri into smaller pieces that can be used to talk to the rest endpoint for RabbitMQ. }
# Present graphically the relationship between per capita A and marginal alpha effects. # This provides intuitions for the allocation problem across planners. # Tested out graphing structure here: https://fanwangecon.github.io/R4Econ/tabgraph/ggscatter/htmlpdfr/fs_ggscatter_3cts_mdisc.html # The code/loop structure follows AllocateR\alloc_discrete_fun_R\fs_mpc_tables_increments_202103.R # fs_mpc_tables_increments_202103.R generates the A and alpha inputs needed here. try(dev.off(dev.list()["RStudioGD"]),silent=TRUE) try(dev.off(),silent=TRUE) library(tidyverse) library(REconTools) # library(PrjOptiAlloc) library(forcats) library(foreach) library(doParallel) # it_no_cores <- detectCores(logical = TRUE) # cl <- makeCluster(5) # registerDoParallel(cl) # Number of ways to cut income bins ls_it_income_cuts <- c(1,2,3,4) # Types of allocation files to consider ls_st_file_suffix_trumpchk <- c('snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_trumpchk <- rev(ls_st_file_suffix_trumpchk) ls_st_file_suffix_bidenchk <- c('snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_bidenchk <- rev(ls_st_file_suffix_bidenchk) ls_st_file_suffix_bchklock <- c('snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_bchklock <- rev(ls_st_file_suffix_bchklock) bl_per_capita <- TRUE fl_rho <- 1 # list to run ls_st_file_suffix <- c(ls_st_file_suffix_bidenchk, ls_st_file_suffix_trumpchk, ls_st_file_suffix_bchklock) # ls_st_file_suffix <- c(ls_st_file_suffix_trumpchk, ls_st_file_suffix_bidenchk, # ls_st_file_suffix_bidenchk_betaedu, ls_st_file_suffix_trumpchk_betaedu) # ls_st_file_suffix_test <- "snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168" # ls_st_file_suffix <- c(ls_st_file_suffix_test) for (it_income_cuts in ls_it_income_cuts) { if (it_income_cuts == 1) { # 20k interval, between 0 and 100k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 2e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 1e5 # File name st_snm_suffix_save <- '_0t100k20kbin' } else if (it_income_cuts == 2) { # 10k interval, between 0 and 150k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 1e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 1.5e5 # File name st_snm_suffix_save <- '_0t150k10kbin' } else if (it_income_cuts == 3) { # 10k interval, between 0 and 200k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 1e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 2e5 # File name st_snm_suffix_save <- '_0t200k10kbin' } else if (it_income_cuts == 4) { # 5k interval, between 0 and 200k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 5e3 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 2e5 # File name st_snm_suffix_save <- '_0t200k5kbin' } ar_income_bins <- c(seq(fl_bin_start, fl_bin_final_start, by=fl_bin_width), fl_bin_final_end) # Solve iteratively for (st_which_solu in ls_st_file_suffix) { # Files: # source('fs_opti_support.R') # st_which_solu <- 'b1_manna' # st_which_solu <- paste0('b1_xi0_manna_88_', st_file_suffix) ls_output <- fs_opti_support_202103(st_which_solu, bl_per_capita=bl_per_capita, fl_rho=fl_rho) st_b0b1 <- ls_output$st_b0b1 st_file_type_withspouse_shock <- ls_output$st_file_type_withspouse_shock st_file_type_withspouse_shock <- ls_output$st_file_type_withspouse_shock snm_simu_csv_withspouse_shock <- ls_output$snm_simu_csv_withspouse_shock srt_simu_path <- ls_output$srt_simu_path srt_csv_path_root <- ls_output$srt_csv_path_root srt_imgcsv_mpcapc_root <- ls_output$srt_imgcsv_mpcapc_root ar_rho <- ls_output$ar_rho bl_save_img <- TRUE ### Variable Names and Paths ## Common Parameters Across # Max Phase Out given 1200*2 + 500*4 = 4400 fl_max_phaseout = 200000 it_bin_dollar_before_phaseout = 500 # Dollar Per Check fl_percheck_dollar = 100 # Meaning of Ymin Ymax simulated interval of 1 fl_multiple = 58056 # Number of Max Checks it_max_checks_1st = 44 it_max_checks_2nd = 88 # Number of Tax Paying Households fl_tax_hh = 128580000 # Number of Income Groups to Use: use 25 for 10,000 = 1 # Age Conditions # Two age group considerations for (bl_cap_x_axis in c(TRUE, FALSE)) { for (bl_log_x_axis in c(TRUE, FALSE)) { for (it_age_type in c(1, 2)) { if (it_age_type == 1) { it_max_age = 64 it_min_age = 18 } if (it_age_type == 2) { it_max_age = 99 it_min_age = 18 } st_img_suf_age_ybin <- paste0(it_min_age, 't', it_max_age) for (MPC_type in c(1)) { snm_save_csv <- "" if (MPC_type == 1 | MPC_type == 3) { if (MPC_type == 1) { snm_save_csv <- 'mpc_smooth' } if (MPC_type == 3) { snm_save_csv <- 'apc_smooth' } } if (MPC_type == 2 | MPC_type == 4) { if (MPC_type == 2) { snm_save_csv <- 'mpc_raw' } if (MPC_type == 4) { snm_save_csv <- 'apc_raw' } } # File name final construction snm_save_csv_file = paste0(snm_save_csv, '_bykidsmarital20k_allchecks_', st_img_suf_age_ybin, st_snm_suffix_save) # Load File df_MPC_results <- as_tibble( read.csv(file.path(srt_imgcsv_mpcapc_root, paste0(snm_save_csv_file, '.csv')), header=TRUE)) # summarize # REconTools::ff_summ_percentiles(df_MPC_results, bl_statsasrows = FALSE) # Gather the needed data df_MPC_results_A_alpha <- df_MPC_results %>% select(X, marital, kids, ymin_group, mass, c_avg_chk0_usd, X0) %>% mutate(A = c_avg_chk0_usd/(1 + marital + kids), alpha = X0) # The continuous variables are A, alpha, mass. The categorical variables are kids and marital. # Modifying dataframe ------ # Marital and Kids Level Labeling marry_levels <- c(Single = "0", Married = "1") kids_levels <- c("0" = "0", "1" = "1", "2" = "2", "3" = "3", "4" = "4") # ## Unique Kids Count # it_kids_marital_unique_n <- dim(as.matrix(df_alloc_combined # %>% group_by(kids, marital) %>% summarize(freq=n())))[1] ## Select, as factor, and recode df_alloc_use <- df_MPC_results_A_alpha %>% mutate(kids = as.numeric(kids)) %>% # filter(kids <= 2) %>% mutate(kids = as.factor(kids), marital = as.factor(marital)) %>% mutate(kids = fct_recode(kids, !!!kids_levels), marital = fct_recode(marital, !!!marry_levels)) # Get value for minimum and maximum income levels for each bin df_alloc_use <- df_alloc_use %>% rowwise() %>% mutate(y_group_min = substring(strsplit(ymin_group, ",")[[1]][1], 2), y_group_max = gsub(strsplit(ymin_group, ",")[[1]][2], pattern = "]", replacement = "")) %>% ungroup() # To avoid discontinuity of the highest income bin, >200k for all if (bl_cap_x_axis) { df_alloc_use <- df_alloc_use %>% mutate(y_group_max = as.numeric(y_group_max)) %>% filter(y_group_max <= 10) } # Generate factors df_alloc_use <- df_alloc_use %>% mutate(ymin_group = as.factor(ymin_group), kids = as.factor(kids), marital = as.factor(marital)) # Transform scale df_alloc_use <- df_alloc_use %>% mutate(mass = mass*100, alpha = alpha*100) if (bl_log_x_axis) { df_alloc_use <- df_alloc_use %>% mutate(A = log(A)) } # Use R4Econ Scatter Code Template # Graphing plt_mtcars_scatter <- ggplot(df_alloc_use, aes(x=A, y=alpha)) + geom_jitter(aes(size=mass, colour=kids, shape=marital), width = 0.15) + # geom_smooth(span = 0.50, se=FALSE) + theme(text = element_text(size = 16)) # Color controls # ar_st_colors <- c("#33cc33", "#F8766D") # ar_st_colors_label <- c("v-shaped", "straight") fl_legend_color_symbol_size <- 5 st_leg_color_lab <- "Number of children" # Shape controls # ar_it_shapes <- c(9, 15) # ar_st_shapes_label <- c("auto", "manuel") fl_legend_shape_symbol_size <- 5 st_leg_shape_lab <- "Marital status" # Control scatter point size fl_min_size <- 1.25 fl_max_size <- 12 ar_size_range <- c(fl_min_size, fl_max_size) st_leg_size_lab <- "Population share (%)" # Labeling # st_title <- paste0('Distribution of HP and QSEC from mtcars') # st_subtitle <- paste0('https://fanwangecon.github.io/', # 'R4Econ/tabgraph/ggscatter/htmlpdfr/fs_ggscatter_3cts_mdisc.html') # st_caption <- paste0('mtcars dataset, ', # 'https://fanwangecon.github.io/R4Econ/') if (bl_log_x_axis) { st_x_label <- 'Log average consumption per household member before receiving stimulus checks' } else { st_x_label <- 'Average consumption per household member before receiving stimulus checks' } st_y_label <- 'MPC out of first $100 in stimulus checks (percent)' # Add titles and labels plt_mtcars_scatter <- plt_mtcars_scatter + labs(x = st_x_label, y = st_y_label) # Color, shape and size controls plt_mtcars_scatter <- plt_mtcars_scatter + # scale_colour_manual(values=ar_st_colors, labels=ar_st_colors_label) + # scale_shape_manual(values=ar_it_shapes, labels=ar_st_shapes_label) + scale_size_continuous(range = ar_size_range) # replace the default labels for each legend segment plt_mtcars_scatter <- plt_mtcars_scatter + labs(colour = st_leg_color_lab, shape = st_leg_shape_lab, size = st_leg_size_lab) # Control the order of legend display # Show color, show shape, then show size. plt_mtcars_scatter <- plt_mtcars_scatter + guides( shape = guide_legend(order = 1, override.aes = list(size = fl_legend_shape_symbol_size)), colour = guide_legend(order = 2, override.aes = list(size = fl_legend_color_symbol_size)), size = guide_legend(order = 3)) # x-axis if (bl_log_x_axis) { print('do not label x-axis, not in level') } else { x.labels <- c('0', '25K', '50k', '75k', '100K', '125K', '150K') x.breaks <- c(0, 25000, 50000, 75000, 100000, 125000, 150000) # x-axis labeling plt_mtcars_scatter <- plt_mtcars_scatter + scale_x_continuous(labels = x.labels, breaks = x.breaks) } # Graph if (bl_save_img) { # Image names for savings if (bl_log_x_axis) { snm_save_csv_file <- paste0(snm_save_csv_file, '_logy') } if (bl_cap_x_axis) { snm_save_csv_file <- paste0(snm_save_csv_file, '_capx') } # add png snm_save_png <- paste0(snm_save_csv_file, '.png') # ggsave(plt_mtcars_scatter, file=file.path(srt_imgcsv_mpcapc_root, snm_save_png), width = 270, height = 216, units='mm', dpi = 300) } } } } } } } # stopCluster(cl)
/AllocateR/alloc_discrete_paper_graphs/fs_A_alpha_scatter.R
permissive
DynamicProgramming-StructuralEstimation/PrjOptiSNW
R
false
false
14,165
r
# Present graphically the relationship between per capita A and marginal alpha effects. # This provides intuitions for the allocation problem across planners. # Tested out graphing structure here: https://fanwangecon.github.io/R4Econ/tabgraph/ggscatter/htmlpdfr/fs_ggscatter_3cts_mdisc.html # The code/loop structure follows AllocateR\alloc_discrete_fun_R\fs_mpc_tables_increments_202103.R # fs_mpc_tables_increments_202103.R generates the A and alpha inputs needed here. try(dev.off(dev.list()["RStudioGD"]),silent=TRUE) try(dev.off(),silent=TRUE) library(tidyverse) library(REconTools) # library(PrjOptiAlloc) library(forcats) library(foreach) library(doParallel) # it_no_cores <- detectCores(logical = TRUE) # cl <- makeCluster(5) # registerDoParallel(cl) # Number of ways to cut income bins ls_it_income_cuts <- c(1,2,3,4) # Types of allocation files to consider ls_st_file_suffix_trumpchk <- c('snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_trumpchk <- rev(ls_st_file_suffix_trumpchk) ls_st_file_suffix_bidenchk <- c('snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_bidenchk <- rev(ls_st_file_suffix_bidenchk) ls_st_file_suffix_bchklock <- c('snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt95', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt60', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_married', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried', 'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168') ls_st_file_suffix_bchklock <- rev(ls_st_file_suffix_bchklock) bl_per_capita <- TRUE fl_rho <- 1 # list to run ls_st_file_suffix <- c(ls_st_file_suffix_bidenchk, ls_st_file_suffix_trumpchk, ls_st_file_suffix_bchklock) # ls_st_file_suffix <- c(ls_st_file_suffix_trumpchk, ls_st_file_suffix_bidenchk, # ls_st_file_suffix_bidenchk_betaedu, ls_st_file_suffix_trumpchk_betaedu) # ls_st_file_suffix_test <- "snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168" # ls_st_file_suffix <- c(ls_st_file_suffix_test) for (it_income_cuts in ls_it_income_cuts) { if (it_income_cuts == 1) { # 20k interval, between 0 and 100k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 2e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 1e5 # File name st_snm_suffix_save <- '_0t100k20kbin' } else if (it_income_cuts == 2) { # 10k interval, between 0 and 150k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 1e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 1.5e5 # File name st_snm_suffix_save <- '_0t150k10kbin' } else if (it_income_cuts == 3) { # 10k interval, between 0 and 200k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 1e4 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 2e5 # File name st_snm_suffix_save <- '_0t200k10kbin' } else if (it_income_cuts == 4) { # 5k interval, between 0 and 200k and 100 million # generate income cut-offs fl_bin_start <- 0 # width equal to 20,000 fl_bin_width <- 5e3 # final point is 100 million fl_bin_final_end <- 1e8 # final segment starting point is 100,000 dollars fl_bin_final_start <- 2e5 # File name st_snm_suffix_save <- '_0t200k5kbin' } ar_income_bins <- c(seq(fl_bin_start, fl_bin_final_start, by=fl_bin_width), fl_bin_final_end) # Solve iteratively for (st_which_solu in ls_st_file_suffix) { # Files: # source('fs_opti_support.R') # st_which_solu <- 'b1_manna' # st_which_solu <- paste0('b1_xi0_manna_88_', st_file_suffix) ls_output <- fs_opti_support_202103(st_which_solu, bl_per_capita=bl_per_capita, fl_rho=fl_rho) st_b0b1 <- ls_output$st_b0b1 st_file_type_withspouse_shock <- ls_output$st_file_type_withspouse_shock st_file_type_withspouse_shock <- ls_output$st_file_type_withspouse_shock snm_simu_csv_withspouse_shock <- ls_output$snm_simu_csv_withspouse_shock srt_simu_path <- ls_output$srt_simu_path srt_csv_path_root <- ls_output$srt_csv_path_root srt_imgcsv_mpcapc_root <- ls_output$srt_imgcsv_mpcapc_root ar_rho <- ls_output$ar_rho bl_save_img <- TRUE ### Variable Names and Paths ## Common Parameters Across # Max Phase Out given 1200*2 + 500*4 = 4400 fl_max_phaseout = 200000 it_bin_dollar_before_phaseout = 500 # Dollar Per Check fl_percheck_dollar = 100 # Meaning of Ymin Ymax simulated interval of 1 fl_multiple = 58056 # Number of Max Checks it_max_checks_1st = 44 it_max_checks_2nd = 88 # Number of Tax Paying Households fl_tax_hh = 128580000 # Number of Income Groups to Use: use 25 for 10,000 = 1 # Age Conditions # Two age group considerations for (bl_cap_x_axis in c(TRUE, FALSE)) { for (bl_log_x_axis in c(TRUE, FALSE)) { for (it_age_type in c(1, 2)) { if (it_age_type == 1) { it_max_age = 64 it_min_age = 18 } if (it_age_type == 2) { it_max_age = 99 it_min_age = 18 } st_img_suf_age_ybin <- paste0(it_min_age, 't', it_max_age) for (MPC_type in c(1)) { snm_save_csv <- "" if (MPC_type == 1 | MPC_type == 3) { if (MPC_type == 1) { snm_save_csv <- 'mpc_smooth' } if (MPC_type == 3) { snm_save_csv <- 'apc_smooth' } } if (MPC_type == 2 | MPC_type == 4) { if (MPC_type == 2) { snm_save_csv <- 'mpc_raw' } if (MPC_type == 4) { snm_save_csv <- 'apc_raw' } } # File name final construction snm_save_csv_file = paste0(snm_save_csv, '_bykidsmarital20k_allchecks_', st_img_suf_age_ybin, st_snm_suffix_save) # Load File df_MPC_results <- as_tibble( read.csv(file.path(srt_imgcsv_mpcapc_root, paste0(snm_save_csv_file, '.csv')), header=TRUE)) # summarize # REconTools::ff_summ_percentiles(df_MPC_results, bl_statsasrows = FALSE) # Gather the needed data df_MPC_results_A_alpha <- df_MPC_results %>% select(X, marital, kids, ymin_group, mass, c_avg_chk0_usd, X0) %>% mutate(A = c_avg_chk0_usd/(1 + marital + kids), alpha = X0) # The continuous variables are A, alpha, mass. The categorical variables are kids and marital. # Modifying dataframe ------ # Marital and Kids Level Labeling marry_levels <- c(Single = "0", Married = "1") kids_levels <- c("0" = "0", "1" = "1", "2" = "2", "3" = "3", "4" = "4") # ## Unique Kids Count # it_kids_marital_unique_n <- dim(as.matrix(df_alloc_combined # %>% group_by(kids, marital) %>% summarize(freq=n())))[1] ## Select, as factor, and recode df_alloc_use <- df_MPC_results_A_alpha %>% mutate(kids = as.numeric(kids)) %>% # filter(kids <= 2) %>% mutate(kids = as.factor(kids), marital = as.factor(marital)) %>% mutate(kids = fct_recode(kids, !!!kids_levels), marital = fct_recode(marital, !!!marry_levels)) # Get value for minimum and maximum income levels for each bin df_alloc_use <- df_alloc_use %>% rowwise() %>% mutate(y_group_min = substring(strsplit(ymin_group, ",")[[1]][1], 2), y_group_max = gsub(strsplit(ymin_group, ",")[[1]][2], pattern = "]", replacement = "")) %>% ungroup() # To avoid discontinuity of the highest income bin, >200k for all if (bl_cap_x_axis) { df_alloc_use <- df_alloc_use %>% mutate(y_group_max = as.numeric(y_group_max)) %>% filter(y_group_max <= 10) } # Generate factors df_alloc_use <- df_alloc_use %>% mutate(ymin_group = as.factor(ymin_group), kids = as.factor(kids), marital = as.factor(marital)) # Transform scale df_alloc_use <- df_alloc_use %>% mutate(mass = mass*100, alpha = alpha*100) if (bl_log_x_axis) { df_alloc_use <- df_alloc_use %>% mutate(A = log(A)) } # Use R4Econ Scatter Code Template # Graphing plt_mtcars_scatter <- ggplot(df_alloc_use, aes(x=A, y=alpha)) + geom_jitter(aes(size=mass, colour=kids, shape=marital), width = 0.15) + # geom_smooth(span = 0.50, se=FALSE) + theme(text = element_text(size = 16)) # Color controls # ar_st_colors <- c("#33cc33", "#F8766D") # ar_st_colors_label <- c("v-shaped", "straight") fl_legend_color_symbol_size <- 5 st_leg_color_lab <- "Number of children" # Shape controls # ar_it_shapes <- c(9, 15) # ar_st_shapes_label <- c("auto", "manuel") fl_legend_shape_symbol_size <- 5 st_leg_shape_lab <- "Marital status" # Control scatter point size fl_min_size <- 1.25 fl_max_size <- 12 ar_size_range <- c(fl_min_size, fl_max_size) st_leg_size_lab <- "Population share (%)" # Labeling # st_title <- paste0('Distribution of HP and QSEC from mtcars') # st_subtitle <- paste0('https://fanwangecon.github.io/', # 'R4Econ/tabgraph/ggscatter/htmlpdfr/fs_ggscatter_3cts_mdisc.html') # st_caption <- paste0('mtcars dataset, ', # 'https://fanwangecon.github.io/R4Econ/') if (bl_log_x_axis) { st_x_label <- 'Log average consumption per household member before receiving stimulus checks' } else { st_x_label <- 'Average consumption per household member before receiving stimulus checks' } st_y_label <- 'MPC out of first $100 in stimulus checks (percent)' # Add titles and labels plt_mtcars_scatter <- plt_mtcars_scatter + labs(x = st_x_label, y = st_y_label) # Color, shape and size controls plt_mtcars_scatter <- plt_mtcars_scatter + # scale_colour_manual(values=ar_st_colors, labels=ar_st_colors_label) + # scale_shape_manual(values=ar_it_shapes, labels=ar_st_shapes_label) + scale_size_continuous(range = ar_size_range) # replace the default labels for each legend segment plt_mtcars_scatter <- plt_mtcars_scatter + labs(colour = st_leg_color_lab, shape = st_leg_shape_lab, size = st_leg_size_lab) # Control the order of legend display # Show color, show shape, then show size. plt_mtcars_scatter <- plt_mtcars_scatter + guides( shape = guide_legend(order = 1, override.aes = list(size = fl_legend_shape_symbol_size)), colour = guide_legend(order = 2, override.aes = list(size = fl_legend_color_symbol_size)), size = guide_legend(order = 3)) # x-axis if (bl_log_x_axis) { print('do not label x-axis, not in level') } else { x.labels <- c('0', '25K', '50k', '75k', '100K', '125K', '150K') x.breaks <- c(0, 25000, 50000, 75000, 100000, 125000, 150000) # x-axis labeling plt_mtcars_scatter <- plt_mtcars_scatter + scale_x_continuous(labels = x.labels, breaks = x.breaks) } # Graph if (bl_save_img) { # Image names for savings if (bl_log_x_axis) { snm_save_csv_file <- paste0(snm_save_csv_file, '_logy') } if (bl_cap_x_axis) { snm_save_csv_file <- paste0(snm_save_csv_file, '_capx') } # add png snm_save_png <- paste0(snm_save_csv_file, '.png') # ggsave(plt_mtcars_scatter, file=file.path(srt_imgcsv_mpcapc_root, snm_save_png), width = 270, height = 216, units='mm', dpi = 300) } } } } } } } # stopCluster(cl)
# Step-1 # Identify the Problem Statement, What are you trying to solve? ##"Predict the rating of (movie/web series/etc.) based on its characteristics" # Step-2 # Identify the Target variable, What value will be predicted? ##"Rating" # Step-3 #Loading the raw Data InputData=read.csv(choose.files(),na.strings=c("","","NA","null")) View(InputData) #out of the 16 columns, which one is the target variable? #Rating column ##start looking at the data from different aspects ##Now lets understand out of 15 columns which are useful and which aren't. # Step-4 # Exploring the dataset str(InputData) head(InputData,10) ###Title: ## continuous variable #names of anime releases,all are different name. #Just the name of a anime doesn't drive the rating of it #it is not helpful from the business perspective ###Media type ##categorical variable #it helps to understand in which media type anime is popular #so media type can affect the popularity of anime. #so we dont reject this column in the first phase of filteration. ###episodes ##continuous variable #it tells the number of episodes the anime have #it can help to predict the rating. #so dont reject this column in the first phase of filteration. ### duration ##continuous variable #it tells about duration. #if the duration is less many people get interest to watch,so it can help to predict the rating #so dont reject this column in the first phase of filteration. ###ongoing ##categorical variable #consists of yes/no values table(InputData$ongoing) #no-6942,yes-87 #it has very less value of yes compare to no #so it doesn't help in the prediction of rating ### sz of release ##categorical variable #consits of: #Fall,spring,summer,winter table(InputData$sznOfRelease) #fall-613,spring-645,summer-424,winter-431 #it has too many missing value,so it cant help in the prediction of rating ###description ##continuous variable # it tells about the synoposis of plot,all the descriptions are different #it doesn't help in the prediction of rating #so reject this column. ###studios ##continuous variable #see how many unique variables are there length(InputData$studios) #7029 #all are different studios,it doesnt derive in the prediction of rating ###tags ##continuous variable #it is representing the tags,genre i.e category of artistic,music etc.of the anime #it can't help to predict the rating of it ###content warn ##continuous variable # it is telling about the contents of the anime, #it doesn't derive to predict the rating. #they are not helpful from buisness prespective. ###watched ##continuous variable #it tells the number of people who completed watching it. #it would help in prediction of rating. ###watching ##continuous variable #it tells the number of people are still watching the shows. #it would help in prediction of rating. ###want watch ##continuous variable #it tells the number of people want to watch the shows. #it would help in prediction of rating. ###dropped ##continuous variable #it tells the number of people have dropped before the completion of shows.. #it would help in prediction of rating. ###votes ##continuous variable #it would help in prediction of rating. # Removing useless columns InputData[, c("title", "sznOfRelease", "ongoing", "description", "studios","tags","contentWarn")] = NULL head(InputData,10) # Step-5 # Whether it is a Regression problem or Classification? #target variable-rating #continuous-regression # Step-6 # Checking and treating missing values # Checking missing values colSums(is.na(InputData)) #### Imputing missing values for CATEGORICAL columns FunctionMode=function(inpData){ ModeValue=names(table(inpData)[table(inpData)==max(table(inpData))]) return(ModeValue) } # Calling the mode function to get the mode value FunctionMode(InputData$mediaType) # Imputing missing values for Categorical columns InputData$mediaType[is.na(InputData$mediaType)] = "TV" # Checking missing values after imputation colSums(is.na(InputData)) #### Imputing missing values for ContinuousL columns InputData$watched[is.na(InputData$watched)] = round(median(InputData$watched,na.rm=TRUE),digits=0) InputData$duration[is.na(InputData$duration)] =median(InputData$duration,na.rm=TRUE) # Checking missing values after imputation colSums(is.na(InputData)) ###treating the outliers### ############see for which of the columns you have to treat the outliers##### ###eps boxplot(InputData$eps,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$eps,c(0.99,0.995,0.997,0.999,0.9993,0.9996)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$eps,0.999) quantiles_final max(InputData$eps) InputData$eps = ifelse(InputData$eps > quantiles_final , quantiles_final, InputData$eps) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$eps, horizontal = T) ###duration boxplot(InputData$duration,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$duration,c(0.99,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$duration,0.9995) quantiles_final max(InputData$duration) InputData$duration = ifelse(InputData$duration > quantiles_final , quantiles_final, InputData$duration) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$duration, horizontal = T) ###watched boxplot(InputData$watched,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$watched,c(0.95,0.96,0.963,0.965,0.97,0.98,0.99,0.995)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$watched,0.963) quantiles_final max(InputData$watched) InputData$watched = ifelse(InputData$watched > quantiles_final , quantiles_final, InputData$watched) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$watched, horizontal = T) ###watching boxplot(InputData$watching,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$watching,c(0.95,0.96,0.963,0.965,0.97,0.98,0.99,0.995)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$watching,0.963) quantiles_final max(InputData$watching) InputData$watching = ifelse(InputData$watching > quantiles_final , quantiles_final, InputData$watching) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$watching, horizontal = T) ###wantwatch boxplot(InputData$wantWatch,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$wantWatch,c(0.99,0.995,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$wantWatch,0.9997) quantiles_final max(InputData$wantWatch) InputData$wantWatch = ifelse(InputData$wantWatch > quantiles_final , quantiles_final, InputData$wantWatch) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$wantWatch, horizontal = T) ###dropped boxplot(InputData$dropped,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$dropped,c(0.995,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$dropped,0.997) quantiles_final max(InputData$dropped) InputData$dropped = ifelse(InputData$dropped > quantiles_final , quantiles_final, InputData$dropped) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$dropped, horizontal = T) ###rating boxplot(InputData$rating,horizontal=T) ##it doesnt have any higher value. #it doesnt need to treat outliers. ###votes boxplot(InputData$votes,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$votes,c(0.93,0.935,0.94,0.943,0.945,0.947,0.95,0.953)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$votes,0.94) quantiles_final max(InputData$votes) InputData$votes = ifelse(InputData$votes > quantiles_final , quantiles_final, InputData$votes) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$votes, horizontal = T) # Step-7 # Explore each "Potential" predictor for distribution and Quality ##Univariate analysis ##continuous column- histogram ##categorical column- bar plot # Exploring MULTIPLE CONTINUOUS features ColsForHist=c("eps","duration","watched","watching","wantWatch","dropped", "votes","rating") #Splitting the plot window par(mfrow=c(2,4)) # library to generate professional colors library(RColorBrewer) # looping to create the histograms for each column for (contCol in ColsForHist){ hist(InputData[,c(contCol)], main=paste('Histogram of:', contCol), col=brewer.pal(8,"Paired")) } ############################################################ # Exploring MULTIPLE CATEGORICAL features ColsForBar=c("mediaType") par(mfrow=c(1,1)) # looping to create the Bar-Plots for each column for (catCol in ColsForBar){ barplot(table(InputData[,c(catCol)]), main=paste('Barplot of:', catCol), col=brewer.pal(8,"Paired")) } # Step-8 ##bivariate analysis # Visual Relationship between predictors and target variable ##Regression- 2 scenarios # Continuous Vs Continuous ---- Scatter Plot # Continuous Vs Categorical --- Box Plot # Continuous Vs Continuous --- Scatter plot # For multiple columns at once ContinuousCols = c("rating","eps","duration","watched","watching","wantWatch","dropped", "votes") par(mfrow=c(1,1)) plot(InputData[, ContinuousCols], col='blue') # Continuous Vs Categorical Visual analysis: Boxplot CategoricalCols = c("mediaType") library(RColorBrewer) for (bar_cols in CategoricalCols){ boxplot(rating~ (InputData[,c(bar_cols)]), data = InputData, main=paste('Box plot of:',bar_cols),col=brewer.pal(8,"Paired")) } # Step-9 # Strength of Relationship between predictor and target variable # Continuous Vs Continuous ---- Correlation test # Continuous Vs Categorical---- ANOVA test # Continuous Vs Continuous : Correlation analysis # Correlation for multiple columns at once ContinuousCols = c("rating","eps","duration","watched","watching","wantWatch","dropped", "votes_new") cor(InputData[, ContinuousCols], use = "complete.obs") CorrData=cor(InputData[, ContinuousCols], use = "complete.obs") CorrData # Final columns which has high correlation with the target variable names(CorrData[,'rating'][abs(CorrData[,'rating'])>0.4]) # Selecting below continuous columns for Model #watched- good variable #wantWatch-good variable #watching-good variable #votes- good variable # Continuous Vs Categorical correlation strength: ANOVA # Analysis of Variance(ANOVA) # H0: Variables are NOT correlated # Small P-Value--> Variables are correlated(H0 is rejected) # Large P-Value--> Variables are NOT correlated (H0 is accepted) # as we have only one column so no need to use for loop . summary(aov(rating ~ InputData$mediaType, data = InputData)) # Selecting below columns based on ANOVA results(p value < 0.05) #"mediaType" ######################################################################### ######################################################################### # Step-10 # Generating the Data frame for machine learning TargetVariableName=c('rating') # Choosing multiple Predictors which may have relation with Target Variable # Based on the exploratory data analysis BestPredictorName= c("mediaType","watched","watching","wantWatch","votes") # Extracting Target and predictor variables from data to create a generic dataset TargetVariable=InputData[, c(TargetVariableName)] str(TargetVariable) # Selecting all other columns as Predictors apart from target variable PredictorVariable=InputData[, BestPredictorName] str(PredictorVariable) DataForML=data.frame(TargetVariable,PredictorVariable) head(DataForML) ######################################################################### # Step-11 # Sampling | Splitting data into 70% for training 30% for testing set.seed(123) TrainingSampleIndex=sample(1:nrow(DataForML), size=0.7 * nrow(DataForML) ) DataForMLTrain=DataForML[TrainingSampleIndex, ] DataForMLTest=DataForML[-TrainingSampleIndex, ] dim(DataForMLTrain) dim(DataForMLTest) head(DataForMLTrain) head(DataForMLTest) # Creating Predictive models on training data to check the accuracy of each algorithm ###### Linear Regression ####### Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) #multiple r-squared-0.441,adjusted r-sq-0.4398 #the fitness of model is not good. # Checking Accuracy of model on Testing data DataForMLTest$Pred_LM=predict(Model_Reg, DataForMLTest) head(DataForMLTest) # Calculating the Absolute Percentage Error for each prediction DataForMLTest$LM_APE= 100 *(abs(DataForMLTest$TargetVariable-DataForMLTest$Pred_LM)/DataForMLTest$TargetVariable) head(DataForMLTest) MeanAPE=mean(DataForMLTest$LM_APE) MedianAPE=median(DataForMLTest$LM_APE) print(paste('### Mean Accuracy of Linear Regression Model is: ', 100 - MeanAPE)) print(paste('### Median Accuracy of Linear Regression Model is: ', 100 - MedianAPE)) ####test for multicollinearity##### Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) library(car) VIF=vif(Model_Reg) data.frame(VIF) #the fitness of model is not good as here value of r square is very low. ######################################################################## # Additional Data Transformations #might reduce skewness and increase the correlation with the target variable # logarithmic is the most common one hist(InputData$votes) cor(x=InputData$votes , y=InputData$rating) min(InputData$votes) #Treating the Zeros in the Columns log(0) #Votes- the number of people voted #apart from 0, minimum no. of people voted =1 InputData$votes[InputData$votes==0]=1 # Log Transformation hist(log(InputData$votes)) cor(x=log(InputData$votes) , y=InputData$rating) #so create a new column votes_new inside inputdata and use it for the model #instead of votes InputData$votes_new=log(InputData$votes) head(InputData,10) ColsForHist=c("eps","duration","watched","watching","wantWatch","dropped", "votes_new","rating") #Splitting the plot window par(mfrow=c(2,4)) # library to generate professional colors library(RColorBrewer) # looping to create the histograms for each column for (contCol in ColsForHist){ hist(InputData[,c(contCol)], main=paste('Histogram of:', contCol), col=brewer.pal(8,"Paired")) } ContinuousCols = c("rating","watched","watching","wantWatch","votes_new") par(mfrow=c(1,1)) plot(InputData[, ContinuousCols], col='blue') #run the whole model and use votes_new instead of votes. TargetVariableName=c('rating') # Choosing multiple Predictors which may have relation with Target Variable # Based on the exploratory data analysis BestPredictorName= c("mediaType","watched","watching","wantWatch","votes_new") # Extracting Target and predictor variables from data to create a generic dataset TargetVariable=InputData[, c(TargetVariableName)] str(TargetVariable) # Selecting all other columns as Predictors apart from target variable PredictorVariable=InputData[, BestPredictorName] str(PredictorVariable) DataForML=data.frame(TargetVariable,PredictorVariable) head(DataForML) set.seed(123) TrainingSampleIndex=sample(1:nrow(DataForML), size=0.7 * nrow(DataForML) ) DataForMLTrain=DataForML[TrainingSampleIndex, ] DataForMLTest=DataForML[-TrainingSampleIndex, ] dim(DataForMLTrain) dim(DataForMLTest) head(DataForMLTrain) head(DataForMLTest) Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) Model_Reg_1=lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) #coefficient estimates #for continuous variables: #postive estimate suggests positive relationship with rating #negative estimate suggests positive relationship with rating #wantwatch-if wantwatch increases by 1 units then rating would increases by 5 units # watched- negative denotes fall (negative relatioship between watched and rating.) #for categorical variable #mediatypemovie: coefficient is positive #reference category is DVD Special #it says that it will give more rating on the movie platform than DVD Special platform #mediatypeother:coefficient is negative #it says that it will give less rating on the other platform than DVD special platform. #similarly we can say for others variable by seeing the coefficient estimate. # Checking Accuracy of model on Testing data DataForMLTest$Pred_LM=predict(Model_Reg_1, DataForMLTest) head(DataForMLTest) # Calculating the Absolute Percentage Error for each prediction DataForMLTest$LM_APE= 100 *(abs(DataForMLTest$TargetVariable-DataForMLTest$Pred_LM)/DataForMLTest$TargetVariable) head(DataForMLTest) MeanAPE=mean(DataForMLTest$LM_APE) MedianAPE=median(DataForMLTest$LM_APE) print(paste('### Mean Accuracy of Linear Regression Model is: ', 100 - MeanAPE)) print(paste('### Median Accuracy of Linear Regression Model is: ', 100 - MedianAPE)) "### Mean Accuracy of Linear Regression Model is: 80.3654391740629" "### Median Accuracy of Linear Regression Model is: 87.441303065507" ####test for multicollinearity##### Model_Reg_1==lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) library(car) VIF=vif(Model_Reg_1) data.frame(VIF) Model_Reg_1=lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) #multiple R-squared- 0.5657,Adjusted R-squared- 0.5648(lies between 0.5 to 1) # by transforming the variable and here it is seen that the fitness of model is good. ######################################################################## ######################################################################## ######################################################################## ###Business Recommendation#### #the anime should release more on the platform movie,TV special,in this platform they can get more ratings. #they should avoid to release on the platform like web,ova,as all are not so much familiar with the technologies. #specially the old people cant see the anime in web,ova. #most of the people preferred to movie,tv special.
/anime.R
no_license
karankatyal456/Anime-Rating-Prediction
R
false
false
20,623
r
# Step-1 # Identify the Problem Statement, What are you trying to solve? ##"Predict the rating of (movie/web series/etc.) based on its characteristics" # Step-2 # Identify the Target variable, What value will be predicted? ##"Rating" # Step-3 #Loading the raw Data InputData=read.csv(choose.files(),na.strings=c("","","NA","null")) View(InputData) #out of the 16 columns, which one is the target variable? #Rating column ##start looking at the data from different aspects ##Now lets understand out of 15 columns which are useful and which aren't. # Step-4 # Exploring the dataset str(InputData) head(InputData,10) ###Title: ## continuous variable #names of anime releases,all are different name. #Just the name of a anime doesn't drive the rating of it #it is not helpful from the business perspective ###Media type ##categorical variable #it helps to understand in which media type anime is popular #so media type can affect the popularity of anime. #so we dont reject this column in the first phase of filteration. ###episodes ##continuous variable #it tells the number of episodes the anime have #it can help to predict the rating. #so dont reject this column in the first phase of filteration. ### duration ##continuous variable #it tells about duration. #if the duration is less many people get interest to watch,so it can help to predict the rating #so dont reject this column in the first phase of filteration. ###ongoing ##categorical variable #consists of yes/no values table(InputData$ongoing) #no-6942,yes-87 #it has very less value of yes compare to no #so it doesn't help in the prediction of rating ### sz of release ##categorical variable #consits of: #Fall,spring,summer,winter table(InputData$sznOfRelease) #fall-613,spring-645,summer-424,winter-431 #it has too many missing value,so it cant help in the prediction of rating ###description ##continuous variable # it tells about the synoposis of plot,all the descriptions are different #it doesn't help in the prediction of rating #so reject this column. ###studios ##continuous variable #see how many unique variables are there length(InputData$studios) #7029 #all are different studios,it doesnt derive in the prediction of rating ###tags ##continuous variable #it is representing the tags,genre i.e category of artistic,music etc.of the anime #it can't help to predict the rating of it ###content warn ##continuous variable # it is telling about the contents of the anime, #it doesn't derive to predict the rating. #they are not helpful from buisness prespective. ###watched ##continuous variable #it tells the number of people who completed watching it. #it would help in prediction of rating. ###watching ##continuous variable #it tells the number of people are still watching the shows. #it would help in prediction of rating. ###want watch ##continuous variable #it tells the number of people want to watch the shows. #it would help in prediction of rating. ###dropped ##continuous variable #it tells the number of people have dropped before the completion of shows.. #it would help in prediction of rating. ###votes ##continuous variable #it would help in prediction of rating. # Removing useless columns InputData[, c("title", "sznOfRelease", "ongoing", "description", "studios","tags","contentWarn")] = NULL head(InputData,10) # Step-5 # Whether it is a Regression problem or Classification? #target variable-rating #continuous-regression # Step-6 # Checking and treating missing values # Checking missing values colSums(is.na(InputData)) #### Imputing missing values for CATEGORICAL columns FunctionMode=function(inpData){ ModeValue=names(table(inpData)[table(inpData)==max(table(inpData))]) return(ModeValue) } # Calling the mode function to get the mode value FunctionMode(InputData$mediaType) # Imputing missing values for Categorical columns InputData$mediaType[is.na(InputData$mediaType)] = "TV" # Checking missing values after imputation colSums(is.na(InputData)) #### Imputing missing values for ContinuousL columns InputData$watched[is.na(InputData$watched)] = round(median(InputData$watched,na.rm=TRUE),digits=0) InputData$duration[is.na(InputData$duration)] =median(InputData$duration,na.rm=TRUE) # Checking missing values after imputation colSums(is.na(InputData)) ###treating the outliers### ############see for which of the columns you have to treat the outliers##### ###eps boxplot(InputData$eps,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$eps,c(0.99,0.995,0.997,0.999,0.9993,0.9996)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$eps,0.999) quantiles_final max(InputData$eps) InputData$eps = ifelse(InputData$eps > quantiles_final , quantiles_final, InputData$eps) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$eps, horizontal = T) ###duration boxplot(InputData$duration,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$duration,c(0.99,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$duration,0.9995) quantiles_final max(InputData$duration) InputData$duration = ifelse(InputData$duration > quantiles_final , quantiles_final, InputData$duration) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$duration, horizontal = T) ###watched boxplot(InputData$watched,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$watched,c(0.95,0.96,0.963,0.965,0.97,0.98,0.99,0.995)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$watched,0.963) quantiles_final max(InputData$watched) InputData$watched = ifelse(InputData$watched > quantiles_final , quantiles_final, InputData$watched) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$watched, horizontal = T) ###watching boxplot(InputData$watching,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$watching,c(0.95,0.96,0.963,0.965,0.97,0.98,0.99,0.995)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$watching,0.963) quantiles_final max(InputData$watching) InputData$watching = ifelse(InputData$watching > quantiles_final , quantiles_final, InputData$watching) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$watching, horizontal = T) ###wantwatch boxplot(InputData$wantWatch,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$wantWatch,c(0.99,0.995,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$wantWatch,0.9997) quantiles_final max(InputData$wantWatch) InputData$wantWatch = ifelse(InputData$wantWatch > quantiles_final , quantiles_final, InputData$wantWatch) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$wantWatch, horizontal = T) ###dropped boxplot(InputData$dropped,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$dropped,c(0.995,0.997,0.999,0.9993,0.9995,0.9997,0.9999)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$dropped,0.997) quantiles_final max(InputData$dropped) InputData$dropped = ifelse(InputData$dropped > quantiles_final , quantiles_final, InputData$dropped) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$dropped, horizontal = T) ###rating boxplot(InputData$rating,horizontal=T) ##it doesnt have any higher value. #it doesnt need to treat outliers. ###votes boxplot(InputData$votes,horizontal=T) #here, there are some very high values in the end which are outliers ##see what is last value in the data, which you won't consider as the outlier quantiles=quantile(InputData$votes,c(0.93,0.935,0.94,0.943,0.945,0.947,0.95,0.953)) quantiles #check which quantile it approx corresponds to #and all the values above that will be replaced quantiles_final=quantile(InputData$votes,0.94) quantiles_final max(InputData$votes) InputData$votes = ifelse(InputData$votes > quantiles_final , quantiles_final, InputData$votes) #check the boxplot again and see whether outliers are removed or not. boxplot(InputData$votes, horizontal = T) # Step-7 # Explore each "Potential" predictor for distribution and Quality ##Univariate analysis ##continuous column- histogram ##categorical column- bar plot # Exploring MULTIPLE CONTINUOUS features ColsForHist=c("eps","duration","watched","watching","wantWatch","dropped", "votes","rating") #Splitting the plot window par(mfrow=c(2,4)) # library to generate professional colors library(RColorBrewer) # looping to create the histograms for each column for (contCol in ColsForHist){ hist(InputData[,c(contCol)], main=paste('Histogram of:', contCol), col=brewer.pal(8,"Paired")) } ############################################################ # Exploring MULTIPLE CATEGORICAL features ColsForBar=c("mediaType") par(mfrow=c(1,1)) # looping to create the Bar-Plots for each column for (catCol in ColsForBar){ barplot(table(InputData[,c(catCol)]), main=paste('Barplot of:', catCol), col=brewer.pal(8,"Paired")) } # Step-8 ##bivariate analysis # Visual Relationship between predictors and target variable ##Regression- 2 scenarios # Continuous Vs Continuous ---- Scatter Plot # Continuous Vs Categorical --- Box Plot # Continuous Vs Continuous --- Scatter plot # For multiple columns at once ContinuousCols = c("rating","eps","duration","watched","watching","wantWatch","dropped", "votes") par(mfrow=c(1,1)) plot(InputData[, ContinuousCols], col='blue') # Continuous Vs Categorical Visual analysis: Boxplot CategoricalCols = c("mediaType") library(RColorBrewer) for (bar_cols in CategoricalCols){ boxplot(rating~ (InputData[,c(bar_cols)]), data = InputData, main=paste('Box plot of:',bar_cols),col=brewer.pal(8,"Paired")) } # Step-9 # Strength of Relationship between predictor and target variable # Continuous Vs Continuous ---- Correlation test # Continuous Vs Categorical---- ANOVA test # Continuous Vs Continuous : Correlation analysis # Correlation for multiple columns at once ContinuousCols = c("rating","eps","duration","watched","watching","wantWatch","dropped", "votes_new") cor(InputData[, ContinuousCols], use = "complete.obs") CorrData=cor(InputData[, ContinuousCols], use = "complete.obs") CorrData # Final columns which has high correlation with the target variable names(CorrData[,'rating'][abs(CorrData[,'rating'])>0.4]) # Selecting below continuous columns for Model #watched- good variable #wantWatch-good variable #watching-good variable #votes- good variable # Continuous Vs Categorical correlation strength: ANOVA # Analysis of Variance(ANOVA) # H0: Variables are NOT correlated # Small P-Value--> Variables are correlated(H0 is rejected) # Large P-Value--> Variables are NOT correlated (H0 is accepted) # as we have only one column so no need to use for loop . summary(aov(rating ~ InputData$mediaType, data = InputData)) # Selecting below columns based on ANOVA results(p value < 0.05) #"mediaType" ######################################################################### ######################################################################### # Step-10 # Generating the Data frame for machine learning TargetVariableName=c('rating') # Choosing multiple Predictors which may have relation with Target Variable # Based on the exploratory data analysis BestPredictorName= c("mediaType","watched","watching","wantWatch","votes") # Extracting Target and predictor variables from data to create a generic dataset TargetVariable=InputData[, c(TargetVariableName)] str(TargetVariable) # Selecting all other columns as Predictors apart from target variable PredictorVariable=InputData[, BestPredictorName] str(PredictorVariable) DataForML=data.frame(TargetVariable,PredictorVariable) head(DataForML) ######################################################################### # Step-11 # Sampling | Splitting data into 70% for training 30% for testing set.seed(123) TrainingSampleIndex=sample(1:nrow(DataForML), size=0.7 * nrow(DataForML) ) DataForMLTrain=DataForML[TrainingSampleIndex, ] DataForMLTest=DataForML[-TrainingSampleIndex, ] dim(DataForMLTrain) dim(DataForMLTest) head(DataForMLTrain) head(DataForMLTest) # Creating Predictive models on training data to check the accuracy of each algorithm ###### Linear Regression ####### Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) #multiple r-squared-0.441,adjusted r-sq-0.4398 #the fitness of model is not good. # Checking Accuracy of model on Testing data DataForMLTest$Pred_LM=predict(Model_Reg, DataForMLTest) head(DataForMLTest) # Calculating the Absolute Percentage Error for each prediction DataForMLTest$LM_APE= 100 *(abs(DataForMLTest$TargetVariable-DataForMLTest$Pred_LM)/DataForMLTest$TargetVariable) head(DataForMLTest) MeanAPE=mean(DataForMLTest$LM_APE) MedianAPE=median(DataForMLTest$LM_APE) print(paste('### Mean Accuracy of Linear Regression Model is: ', 100 - MeanAPE)) print(paste('### Median Accuracy of Linear Regression Model is: ', 100 - MedianAPE)) ####test for multicollinearity##### Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) library(car) VIF=vif(Model_Reg) data.frame(VIF) #the fitness of model is not good as here value of r square is very low. ######################################################################## # Additional Data Transformations #might reduce skewness and increase the correlation with the target variable # logarithmic is the most common one hist(InputData$votes) cor(x=InputData$votes , y=InputData$rating) min(InputData$votes) #Treating the Zeros in the Columns log(0) #Votes- the number of people voted #apart from 0, minimum no. of people voted =1 InputData$votes[InputData$votes==0]=1 # Log Transformation hist(log(InputData$votes)) cor(x=log(InputData$votes) , y=InputData$rating) #so create a new column votes_new inside inputdata and use it for the model #instead of votes InputData$votes_new=log(InputData$votes) head(InputData,10) ColsForHist=c("eps","duration","watched","watching","wantWatch","dropped", "votes_new","rating") #Splitting the plot window par(mfrow=c(2,4)) # library to generate professional colors library(RColorBrewer) # looping to create the histograms for each column for (contCol in ColsForHist){ hist(InputData[,c(contCol)], main=paste('Histogram of:', contCol), col=brewer.pal(8,"Paired")) } ContinuousCols = c("rating","watched","watching","wantWatch","votes_new") par(mfrow=c(1,1)) plot(InputData[, ContinuousCols], col='blue') #run the whole model and use votes_new instead of votes. TargetVariableName=c('rating') # Choosing multiple Predictors which may have relation with Target Variable # Based on the exploratory data analysis BestPredictorName= c("mediaType","watched","watching","wantWatch","votes_new") # Extracting Target and predictor variables from data to create a generic dataset TargetVariable=InputData[, c(TargetVariableName)] str(TargetVariable) # Selecting all other columns as Predictors apart from target variable PredictorVariable=InputData[, BestPredictorName] str(PredictorVariable) DataForML=data.frame(TargetVariable,PredictorVariable) head(DataForML) set.seed(123) TrainingSampleIndex=sample(1:nrow(DataForML), size=0.7 * nrow(DataForML) ) DataForMLTrain=DataForML[TrainingSampleIndex, ] DataForMLTest=DataForML[-TrainingSampleIndex, ] dim(DataForMLTrain) dim(DataForMLTest) head(DataForMLTrain) head(DataForMLTest) Model_Reg=lm(TargetVariable~.,data=DataForMLTrain) summary(Model_Reg) Model_Reg_1=lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) #coefficient estimates #for continuous variables: #postive estimate suggests positive relationship with rating #negative estimate suggests positive relationship with rating #wantwatch-if wantwatch increases by 1 units then rating would increases by 5 units # watched- negative denotes fall (negative relatioship between watched and rating.) #for categorical variable #mediatypemovie: coefficient is positive #reference category is DVD Special #it says that it will give more rating on the movie platform than DVD Special platform #mediatypeother:coefficient is negative #it says that it will give less rating on the other platform than DVD special platform. #similarly we can say for others variable by seeing the coefficient estimate. # Checking Accuracy of model on Testing data DataForMLTest$Pred_LM=predict(Model_Reg_1, DataForMLTest) head(DataForMLTest) # Calculating the Absolute Percentage Error for each prediction DataForMLTest$LM_APE= 100 *(abs(DataForMLTest$TargetVariable-DataForMLTest$Pred_LM)/DataForMLTest$TargetVariable) head(DataForMLTest) MeanAPE=mean(DataForMLTest$LM_APE) MedianAPE=median(DataForMLTest$LM_APE) print(paste('### Mean Accuracy of Linear Regression Model is: ', 100 - MeanAPE)) print(paste('### Median Accuracy of Linear Regression Model is: ', 100 - MedianAPE)) "### Mean Accuracy of Linear Regression Model is: 80.3654391740629" "### Median Accuracy of Linear Regression Model is: 87.441303065507" ####test for multicollinearity##### Model_Reg_1==lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) library(car) VIF=vif(Model_Reg_1) data.frame(VIF) Model_Reg_1=lm(TargetVariable~watched+watching+wantWatch+votes_new+ I(mediaType=="Movie")+I(mediaType=="Other")+I(mediaType=="OVA")+I(mediaType=="TV") +I(mediaType=="TV Special")+I(mediaType=="Web"),data=DataForMLTrain) summary(Model_Reg_1) #multiple R-squared- 0.5657,Adjusted R-squared- 0.5648(lies between 0.5 to 1) # by transforming the variable and here it is seen that the fitness of model is good. ######################################################################## ######################################################################## ######################################################################## ###Business Recommendation#### #the anime should release more on the platform movie,TV special,in this platform they can get more ratings. #they should avoid to release on the platform like web,ova,as all are not so much familiar with the technologies. #specially the old people cant see the anime in web,ova. #most of the people preferred to movie,tv special.
library(tidyverse) library(ieugwasr) args <- commandArgs(trailingOnly=TRUE) X <- readRDS(args[1]) pval_thresh <- as.numeric(args[2]) r2_thresh <- as.numeric(args[3]) ref_path <- args[4] out_file <- args[5] X <- X %>% rename(rsid = snp, pval = p_value) X_clump <- ld_clump(dat = X, clump_r2 = r2_thresh, clump_p = pval_thresh, plink_bin = genetics.binaRies::get_plink_binary(), bfile = ref_path) keep_snps <- X_clump$rsid write_lines(keep_snps, file=out_file) #saveRDS(keep_snps, file=out_file)
/pipeline_code/R/ld_prune_plink.R
no_license
CreRecombinase/cause
R
false
false
599
r
library(tidyverse) library(ieugwasr) args <- commandArgs(trailingOnly=TRUE) X <- readRDS(args[1]) pval_thresh <- as.numeric(args[2]) r2_thresh <- as.numeric(args[3]) ref_path <- args[4] out_file <- args[5] X <- X %>% rename(rsid = snp, pval = p_value) X_clump <- ld_clump(dat = X, clump_r2 = r2_thresh, clump_p = pval_thresh, plink_bin = genetics.binaRies::get_plink_binary(), bfile = ref_path) keep_snps <- X_clump$rsid write_lines(keep_snps, file=out_file) #saveRDS(keep_snps, file=out_file)
\name{NEWS} \title{News for Package \pkg{RcppArmadillo}} \newcommand{\ghpr}{\href{https://github.com/RcppCore/RcppArmadillo/pull/#1}{##1}} \newcommand{\ghit}{\href{https://github.com/RcppCore/RcppArmadillo/issues/#1}{##1}} \section{Changes in RcppArmadillo version 0.10.2.2.0 (2021-03-09)}{ \itemize{ \item Upgraded to Armadillo release 10.2.2 (Cicada Swarm) \itemize{ \item faster handling of subcubes \item added \code{tgamma()} \item added \code{.brief_print()} for abridged printing of matrices & cubes \item expanded forms of \code{trimatu()} and \code{trimatl()} with diagonal specification to handle sparse matrices \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional shift-invert mode } \item Removed \code{debian/} directory from repository as packaging is on salsa.debian.org. \item Relaxed tolerance on two \code{cube} tests on Windows to accomodate new 'gcc10-UCRT' builder. } } \section{Changes in RcppArmadillo version 0.10.2.1.0 (2021-02-09)}{ \itemize{ \item Upgraded to Armadillo release 10.2.1 (Cicada Swarm) \itemize{ \item faster handling of subcubes \item added \code{tgamma()} \item added \code{.brief_print()} for abridged printing of matrices & cubes \item expanded forms of \code{trimatu()} and \code{trimatl()} with diagonal specification to handle sparse matrices \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional shift-invert mode } } } \section{Changes in RcppArmadillo version 0.10.1.2.2 (2021-01-08)}{ \itemize{ \item Correct one unit test for \pkg{Matrix} 1.3.0-caused changed (Binxiang in \ghpr{319} and Dirk in \ghpr{322}). \item Suppress one further warning from \pkg{Matrix} (Dirk) \item Apply an upstream \code{NaN} correction (Conrad in \ghpr{321}) \item Added GitHub Actions CI using \code{run.sh} from r-ci (Dirk) } } \section{Changes in RcppArmadillo version 0.10.1.2.0 (2020-11-15)}{ \itemize{ \item Upgraded to Armadillo release 10.1.2 (Orchid Ambush) \item Remove three unused int constants (\ghit{313}) \item Include main armadillo header using quotes instead of brackets \item Rewrite version number use in old-school mode because gcc 4.8.5 \item Skipping parts of sparse conversion on Windows as win-builder fails } } \section{Changes in RcppArmadillo version 0.10.1.0.0 (2020-10-09)}{ \itemize{ \item Upgraded to Armadillo release 10.1.0 (Orchid Ambush) \itemize{ \item C++11 is now the minimum required C++ standard \item faster handling of compound expressions by \code{trimatu()} and \code{trimatl()} \item faster sparse matrix addition, subtraction and element-wise multiplication \item expanded sparse submatrix views to handle the non-contiguous form of \code{X.cols(vector_of_column_indices)} \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional fine-grained parameters (subspace dimension, number of iterations, eigenvalues closest to specified value) \item deprecated form of \code{reshape()} removed from Cube and SpMat classes \item ignore and warn on use of the \code{ARMA_DONT_USE_CXX11} macro } \item Switch Travis CI testing to focal and BSPM } } \section{Changes in RcppArmadillo version 0.9.900.3.0 (2020-09-02)}{ \itemize{ \item Upgraded to Armadillo release 9.900.3 (Nocturnal Misbehaviour) \itemize{ \item More efficient code for initialising matrices with \code{fill::zeros} \item Fixes for various error messages } } } \section{Changes in RcppArmadillo version 0.9.900.2.0 (2020-07-17)}{ \itemize{ \item Upgraded to Armadillo release 9.900.2 (Nocturnal Misbehaviour) \itemize{ \item In \code{sort()}, fixes for inconsistencies between checks applied to matrix and vector expressions \item In \code{sort()}, remove unnecessary copying when applied in-place to vectors function when applied in-place to vectors } } } \section{Changes in RcppArmadillo version 0.9.900.1.0 (2020-06-08)}{ \itemize{ \item Upgraded to Armadillo release 9.900.1 (Nocturnal Misbehaviour) \itemize{ \item faster \code{solve()} for under/over-determined systems \item faster \code{eig_gen()} and \code{eig_pair()} for large matrices \item expanded \code{eig_gen()} and \code{eig_pair()} to optionally provide left and right eigenvectors } \item Switch Travis CI testing to R 4.0.0, use bionic as base distro and test R 3.6.3 and 4.0.0 in a matrix (Dirk in \ghpr{298}). \item Add two badges to README for indirect use and the CSDA paper. \item Adapt \code{RcppArmadillo.package.skeleton()} to a change in R 4.0.0 affecting what it exports in \code{NAMESPACE}. } } \section{Changes in RcppArmadillo version 0.9.880.1.0 (2020-05-15)}{ \itemize{ \item Upgraded to Armadillo release 9.880.1 (Roasted Mocha Detox) \itemize{ \item expanded \code{qr()} to optionally use pivoted decomposition \item updated physical constants to NIST 2018 CODATA values \item added \code{ARMA_DONT_USE_CXX11_MUTEX} confguration option to disable use of \code{std::mutex} } \item OpenMP capability is tested explicitly (Kevin Ushey and Dirk in \ghpr{294}, \ghpr{295}, and \ghpr{296} all fixing \ghit{290}). } } \section{Changes in RcppArmadillo version 0.9.870.2.0 (2020-04-24)}{ \itemize{ \item Upgraded to Armadillo release 9.870.2 (Roasted Mocha Retox) \itemize{ \item faster handling of matrix multiplication expressions by \code{diagvec()} and \code{diagmat()} \item added \code{trimatu_ind()} and \code{trimatl_ind()} \item more consistent detection of sparse vector expressions } } } \section{Changes in RcppArmadillo version 0.9.860.2.0 (2020-04-13)}{ \itemize{ \item Upgraded to Armadillo release 9.860.2 (Roasted Mocha Fix) \itemize{ \item added \code{powmat()} \item faster access to columns in sparse submatrix views \item faster handling of relational expressions by \code{accu()} \item faster handling of sympd matrices by \code{expmat()}, \code{logmat()}, \code{sqrtmat()} \item workaround for save/load issues with HDF5 v1.12 } \item Vignettes are now pre-made and include (\ghpr{285}) \item Two test files are now skipped on 32-bit Windows } } \section{Changes in RcppArmadillo version 0.9.850.1.0 (2020-02-09)}{ \itemize{ \item Upgraded to Armadillo release 9.850.1 (Pyrocumulus Wrath) \itemize{ \item faster handling of compound expressions by \code{diagmat()} \item expanded \code{.save()} and \code{.load()} to handle CSV files with headers via csv_name(filename,header) specification \item added \code{log_normpdf()} \item added \code{.is_zero()} \item added \code{quantile()} } \item The sparse matrix test using scipy, if available, is now simplified thanks to recently added \CRANpkg{reticulate} conversions. } } \section{Changes in RcppArmadillo version 0.9.800.4.0 (2020-01-24)}{ \itemize{ \item Upgraded to Armadillo release 9.800.4 (Horizon Scraper) \itemize{ \item fixes for incorrect type promotion in \code{normpdf()} } } } \section{Changes in RcppArmadillo version 0.9.800.3.0 (2019-12-04)}{ \itemize{ \item Upgraded to Armadillo release 9.800.3 (Horizon Scraper) \itemize{ \item fixes for matrix row iterators \item better detection of non-hermitian matrices by \code{eig_sym()}, \code{inv_sympd()}, \code{chol()}, \code{expmat_sym()} } \item The \code{sample} function passes the prob vector as const allowing subsequent calls (Christian Gunning in \ghpr{276} fixing \ghit{275}) } } \section{Changes in RcppArmadillo version 0.9.800.1.0 (2019-10-09)}{ \itemize{ \item Upgraded to Armadillo release 9.800 (Horizon Scraper) \itemize{ \item faster \code{solve()} in default operation; iterative refinement is no longer applied by default; use \code{solve_opts::refine} to explicitly enable refinement \item faster \code{expmat()} \item faster handling of triangular matrices by \code{rcond()} \item added \code{.front()} and \code{.back()} \item added \code{.is_trimatu()} and \code{.is_trimatl()} \item added \code{.is_diagmat()} } \item The package now uses \pkg{tinytest} for unit tests (Dirk in \ghpr{269}). \item The \code{configure.ac} script is now more careful about shell portability (Min Kim in \ghpr{270}). } } \section{Changes in RcppArmadillo version 0.9.700.2.0 (2019-09-01)}{ \itemize{ \item Upgraded to Armadillo release 9.700.2 (Gangster Democracy) \itemize{ \item faster handling of cubes by \code{vectorise()} \item faster faster handling of sparse matrices by \code{nonzeros()} \item faster row-wise \code{index_min()} and \code{index_max()} \item expanded \code{join_rows()} and \code{join_cols()} to handle joining up to 4 matrices \item expanded \code{.save()} and \code{.load()} to allow storing sparse matrices in CSV format \item added \code{randperm()} to generate a vector with random permutation of a sequence of integers } \item Expanded the list of known good \code{gcc} and \code{clang} versions in \code{configure.ac} } } \section{Changes in RcppArmadillo version 0.9.600.4.0 (2019-07-14)}{ \itemize{ \item Upgraded to Armadillo release 9.600.4 (Napa Invasion) \itemize{ \item faster handling of sparse submatrices \item faster handling of sparse diagonal views \item faster handling of sparse matrices by \code{symmatu()} and \code{symmatl()} \item faster handling of sparse matrices by \code{join_cols()} \item expanded \code{clamp()} to handle sparse matrices \item added \code{.clean()} to replace elements below a threshold with zeros } } } \section{Changes in RcppArmadillo version 0.9.500.2.0 (2019-06-11)}{ \itemize{ \item Upgraded to Armadillo release 9.500.2 (Riot Compact) \itemize{ \item Expanded \code{solve()} with \code{solve_opts::likely_sympd} to indicate that the given matrix is likely positive definite \item more robust automatic detection of positive definite matrices by \code{solve()} and \code{inv()} \item faster handling of sparse submatrices \item expanded \code{eigs_sym()} to print a warning if the given matrix is not symmetric \item extended LAPACK function prototypes to follow Fortran passing conventions for so-called "hidden arguments", in order to address GCC Bug 90329; to use previous LAPACK function prototypes without the "hidden arguments", \code{#define ARMA_DONT_USE_FORTRAN_HIDDEN_ARGS before #include <armadillo> } } } } \section{Changes in RcppArmadillo version 0.9.400.3.0 (2019-05-09)}{ \itemize{ \item Upgraded to Armadillo release 9.400.3 (Surrogate Miscreant) \itemize{ \item check for symmetric / hermitian matrices (used by decomposition functions) has been made more robust \item \code{linspace()} and \code{logspace()} now honour requests for generation of vectors with zero elements \item fix for vectorisation / flattening of complex sparse matrices } } } \section{Changes in RcppArmadillo version 0.9.400.2.0 (2019-04-28)}{ \itemize{ \item Upgraded to Armadillo release 9.400.2 (Surrogate Miscreant) \itemize{ \item faster \code{cov()} and \code{cor()} \item added \code{.as_col()} and \code{.as_row()} \item expanded \code{.shed_rows()} / \code{.shed_cols()} / \code{.shed_slices()} to remove rows/columns/slices specified in a vector \item expanded \code{vectorise()} to handle sparse matrices \item expanded element-wise versions of \code{max()} and \code{min()} to handle sparse matrices \item optimised handling of sparse matrix expressions: \code{sparse \% (sparse +- scalar)} and \code{sparse / (sparse +- scalar)} \item expanded \code{eig_sym()}, \code{chol()}, \code{expmat_sym()}, \code{logmat_sympd()}, \code{sqrtmat_sympd()}, \code{inv_sympd()} to print a warning if the given matrix is not symmetric \item more consistent detection of vector expressions } } } \section{Changes in RcppArmadillo version 0.9.300.2.0 (2019-03-21)}{ \itemize{ \item Upgraded to Armadillo release 9.300.2 (Fomo Spiral) \itemize{ \item Faster handling of compound complex matrix expressions by \code{trace()} \item More efficient handling of element access for inplace modifications in sparse matrices \item Added \code{.is_sympd()} to check whether a matrix is symmetric/hermitian positive definite \item Added \code{interp2()} for 2D data interpolation \item Added \code{expm1()} and \code{log1p()} \item Expanded \code{.is_sorted()} with options "strictascend" and "strictdescend" \item Expanded \code{eig_gen()} to optionally perform balancing prior to decomposition } } } \section{Changes in RcppArmadillo version 0.9.200.7.1 (2019-03-08)}{ \itemize{ \item Explicit setting of \code{RNGversion("3.5.0")} in one unit test to accomodate the change in \code{sample()} in R 3.6.0 \item Back-ported a fix to the Wishart RNG from upstream (Dirk in \ghpr{248} fixing \ghit{247}) } } \section{Changes in RcppArmadillo version 0.9.200.7.0 (2019-01-17)}{ \itemize{ \item Upgraded to Armadillo release 9.200.7 (Carpe Noctem) \item Fixes in 9.200.7 compared to 9.200.5: \itemize{ \item handling complex compound expressions by \code{trace()} \item handling \code{.rows()} and \code{.cols()} by the \code{Cube} class } } } \section{Changes in RcppArmadillo version 0.9.200.5.0 (2018-11-27)}{ \itemize{ \item Upgraded to Armadillo release 9.200.5 (Carpe Noctem) \item Changes in this release \itemize{ \item linking issue when using fixed size matrices and vectors \item faster handling of common cases by \code{princomp()} } } } \section{Changes in RcppArmadillo version 0.9.200.4.0 (2018-11-09)}{ \itemize{ \item Upgraded to Armadillo release 9.200.4 (Carpe Noctem) \itemize{ \item faster handling of symmetric positive definite matrices by \code{rcond()} \item faster transpose of matrices with size ≥ 512x512 \item faster handling of compound sparse matrix expressions by \code{accu()}, \code{diagmat()}, \code{trace()} \item faster handling of sparse matrices by \code{join_rows()} \item expanded \code{sign()} to handle scalar arguments \item expanded operators (\code{*}, \code{\%}, \code{+}, \code{−}) to handle sparse matrices with differing element types (eg. multiplication of complex matrix by real matrix) \item expanded \code{conv_to()} to allow conversion between sparse matrices with differing element types \item expanded \code{solve()} to optionally allow keeping solutions of systems singular to working precision \item workaround for \code{gcc} and \code{clang} bug in C++17 mode } \item Commented-out sparse matrix test consistently failing on the fedora-clang machine CRAN, and only there. No fix without access. \item The 'Unit test' vignette is no longer included. } } \section{Changes in RcppArmadillo version 0.9.100.5.0 (2018-08-16)}{ \itemize{ \item Upgraded to Armadillo release 9.100.4 (Armatus Ad Infinitum) \itemize{ \item faster handling of symmetric/hermitian positive definite matrices by \code{solve()} \item faster handling of \code{inv_sympd()} in compound expressions \item added \code{.is_symmetric()} \item added \code{.is_hermitian()} \item expanded \code{spsolve()} to optionally allow keeping solutions of systems singular to working precision \item new configuration options \code{ARMA_OPTIMISE_SOLVE_BAND} and \code{ARMA_OPTIMISE_SOLVE_SYMPD} smarter use of the element cache in sparse matrices \item smarter use of the element cache in sparse matrices } \item Aligned OpenMP flags in the RcppArmadillo.package.skeleton used Makevars{,.win} to not use one C and C++ flag. } } \section{Changes in RcppArmadillo version 0.8.600.0.0 (2018-06-28)}{ \itemize{ \item Upgraded to Armadillo release 8.600.0 (Sabretooth Rugrat) \itemize{ \item added \code{hess()} for Hessenberg decomposition \item added \code{.row()}, \code{.rows()}, \code{.col()}, \code{.cols()} to subcube views \item expanded \code{.shed_rows()} and \code{.shed_cols()} to handle cubes \item expanded \code{.insert_rows()} and \code{.insert_cols()} to handle cubes \item expanded subcube views to allow non-contiguous access to slices \item improved tuning of sparse matrix element access operators \item faster handling of tridiagonal matrices by \code{solve()} \item faster multiplication of matrices with differing element types when using OpenMP } } } \section{Changes in RcppArmadillo version 0.8.500.1.1 (2018-05-17) [GH only]}{ \itemize{ \item Upgraded to Armadillo release 8.500.1 (Caffeine Raider) \itemize{ \item bug fix for banded matricex } \item Added \code{slam} to Suggests: as it is used in two unit test functions [CRAN requests] \item The \code{RcppArmadillo.package.skeleton()} function now works with \code{example_code=FALSE} when \CRANpkg{pkgKitten} is present (Santiago Olivella in \ghpr{231} fixing \ghpr{229}) \item The LAPACK tests now cover band matrix solvers (Keith O'Hara in \ghpr{230}). } } \section{Changes in RcppArmadillo version 0.8.500.0 (2018-04-21)}{ \itemize{ \item Upgraded to Armadillo release 8.500 (Caffeine Raider) \itemize{ \item faster handling of sparse matrices by \code{kron()} and \code{repmat()} \item faster transpose of sparse matrices \item faster element access in sparse matrices \item faster row iterators for sparse matrices \item faster handling of compound expressions by \code{trace()} \item more efficient handling of aliasing in submatrix views \item expanded \code{normalise()} to handle sparse matrices \item expanded \code{.transform()} and \code{.for_each()} to handle sparse matrices \item added \code{reverse()} for reversing order of elements \item added \code{repelem()} for replicating elements \item added \code{roots()} for finding the roots of a polynomial } \item Fewer LAPACK compile-time guards are used, new unit tests for underlying features have been added (Keith O'Hara in \ghpr{211} addressing \ghit{207}). \item The configure check for LAPACK features has been updated accordingly (Keith O'Hara in \ghpr{214} addressing \ghit{213}). \item The compile-time check for \code{g++} is now more robust to minimal shell versions (\ghpr{217} addressing \ghit{216}). \item Compiler tests to were added for macOS (Keith O'Hara in \ghpr{219}). } } \section{Changes in RcppArmadillo version 0.8.400.0.0 (2018-02-19)}{ \itemize{ \item Upgraded to Armadillo release 8.400.0 (Entropy Bandit) \itemize{ \item faster handling of sparse matrices by \code{repmat()} \item faster loading of CSV files \item expanded \code{kron()} to handle sparse matrices \item expanded \code{index_min()} and \code{index_max()} to handle cubes \item expanded \code{randi()}, \code{randu()}, \code{randn()}, \code{randg()} to output single scalars \item added submatrix & subcube iterators \item added \code{normcdf()} \item added \code{mvnrnd()} \item added \code{chi2rnd()} \item added \code{wishrnd()} and \code{iwishrnd()} } \item The \code{configure} generated header settings for LAPACK and OpenMP can be overridden by the user. \item This release was preceded by two release candidates which were tested extensively. } } \section{Changes in RcppArmadillo version 0.8.300.1.0 (2017-12-04)}{ \itemize{ \item Upgraded to Armadillo release 8.300.1 (Tropical Shenanigans) \itemize{ \item faster handling of band matrices by \code{solve()} \item faster handling of band matrices by \code{chol()} \item faster \code{randg()} when using OpenMP \item added \code{normpdf()} \item expanded \code{.save()} to allow appending new datasets to existing HDF5 files } \item Includes changes made in several earlier GitHub-only releases (versions 0.8.300.0.0, 0.8.200.2.0 and 0.8.200.1.0). \item Conversion from \code{simple_triplet_matrix} is now supported (Serguei Sokol in \ghpr{192}). \item Updated configure code to check for g++ 5.4 or later to enable OpenMP. \item Updated the skeleton package to current packaging standards \item Suppress warnings from Armadillo about missing OpenMP support and \code{-fopenmp} flags by setting \code{ARMA_DONT_PRINT_OPENMP_WARNING} } } \section{Changes in RcppArmadillo version 0.8.100.1.0 (2017-10-10)}{ \itemize{ \item Upgraded to Armadillo release 8.100.1 (Feral Pursuits) \itemize{ \item faster incremental construction of sparse matrices via element access operators \item faster diagonal views in sparse matrices \item expanded \code{SpMat} to save/load sparse matrices in coord format \item expanded \code{.save()},\code{.load()} to allow specification of datasets within HDF5 files \item added \code{affmul()} to simplify application of affine transformations \item warnings and errors are now printed by default to the \code{std::cerr} stream \item added \code{set_cerr_stream()} and \code{get_cerr_stream()} to replace \code{set_stream_err1()}, \code{set_stream_err2()}, \code{get_stream_err1()}, \code{get_stream_err2()} \item new configuration options \code{ARMA_COUT_STREAM} and \code{ARMA_CERR_STREAM} } \item Constructors for sparse matrices of types \code{dgt}, \code{dtt} amd \code{dst} now use Armadillo code for improved performance (Serguei Sokol in \ghpr{175} addressing \ghit{173}) \item Sparse matrices call \code{.sync()} before accessing internal arrays (Binxiang Ni in \ghpr{171}) \item The sparse matrix vignette has been converted to Rmarkdown using the pinp package, and is now correctly indexed. (\ghpr{176}) } } \section{Changes in RcppArmadillo version 0.7.960.1.2 (2017-08-29)}{ \itemize{ \item On macOS, OpenMP support is now turned off (\ghpr{170}). \item The package is now compiling under the C++11 standard (\ghpr{170}). \item The vignette dependency are correctly set (James and Dirk in \ghpr{168} and \ghpr{169}) } } \section{Changes in RcppArmadillo version 0.7.960.1.1 (2017-08-20)}{ \itemize{ \item Added improved check for inherited S4 matrix classes (\ghpr{162} fixing \ghit{161}) \item Changed \code{fastLm} C++ function to \code{fastLm_impl} to not clash with R method (\ghpr{164} fixing \ghpr{163}) \item Added OpenMP check for \code{configure} (\ghpr{166} fixing \ghit{165}) } } \section{Changes in RcppArmadillo version 0.7.960.1.0 (2017-08-11)}{ \itemize{ \item Upgraded to Armadillo release 7.960.1 (Northern Banana Republic Deluxe) \itemize{ \item faster \code{randn()} when using OpenMP (NB: usually omitted when used fromR) \item faster \code{gmm_diag} class, for Gaussian mixture models with diagonal covariance matrices \item added \code{.sum_log_p()} to the \code{gmm_diag} class \item added \code{gmm_full} class, for Gaussian mixture models with full covariance matrices \item expanded \code{.each_slice()} to optionally use OpenMP for multi-threaded execution } \item Upgraded to Armadillo release 7.950.0 (Northern Banana Republic) \itemize{ \item expanded \code{accu()} and \code{sum()} to use OpenMP for processing expressions with computationally expensive element-wise functions \item expanded \code{trimatu()} and \code{trimatl()} to allow specification of the diagonal which delineates the boundary of the triangular part } \item Enhanced support for sparse matrices (Binxiang Ni as part of Google Summer of Code 2017) \itemize{ \item Add support for \code{dtCMatrix} and \code{dsCMatrix} (\ghpr{135}) \item Add conversion and unit tests for \code{dgT}, \code{dtT} and \code{dsTMatrix} (\ghpr{136}) \item Add conversion and unit tests for \code{dgR}, \code{dtR} and \code{dsRMatrix} (\ghpr{139}) \item Add conversion and unit tests for \code{pMatrix} and \code{ddiMatrix} (\ghpr{140}) \item Rewrite conversion for \code{dgT}, \code{dtT} and \code{dsTMatrix}, and add file-based tests (\ghpr{142}) \item Add conversion and unit tests for \code{indMatrix} (\ghpr{144}) \item Rewrite conversion for \code{ddiMatrix} (\ghpr{145}) \item Add a warning message for matrices that cannot be converted (\ghpr{147}) \item Add new vignette for sparse matrix support (\ghpr{152}; Dirk in \ghpr{153}) \item Add support for sparse matrix conversion from Python SciPy (\ghpr{158} addressing \ghit{141}) } \item Optional return of row or column vectors in collapsed form if appropriate \code{#define} is set (Serguei Sokol in \ghpr{151} and \ghpr{154}) \item Correct \code{speye()} for non-symmetric cases (Qiang Kou in \ghpr{150} closing \ghit{149}). \item Ensure tests using Scientific Python and reticulate are properly conditioned on the packages being present. \item Added \code{.aspell/} directory with small local directory now supported by R-devel. } } \section{Changes in RcppArmadillo version 0.7.900.2.0 (2017-06-02)}{ \itemize{ \item Upgraded to Armadillo release 7.900.2 (Evil Banana Republic) \itemize{ \item Expanded \code{clamp()} to handle cubes \item Computationally expensive element-wise functions (such as \code{exp()}, \code{log()}, \code{cos()}, etc) can now be automatically sped up via OpenMP; this requires a C++11/C++14 compiler with OpenMP 3.0+ support for GCC and clang compilers \item One caveat: when using GCC, use of \code{-march=native} in conjunction with \code{-fopenmp} may lead to speed regressions on recent processors } \item Added gcc 7 to support compiler check (James Balamuta in \ghpr{128} addressing \ghit{126}). \item A unit test helper function for \code{rmultinom} was corrected (\ghpr{133}). \item OpenMP support was added to the skeleton helper in \code{inline.R} } } \section{Changes in RcppArmadillo version 0.7.800.2.0 (2017-04-12)}{ \itemize{ \item Upgraded to Armadillo release 7.800.2 (Rogue State Redux) \itemize{ \item The Armadillo license changed to Apache License 2.0 } \item The \code{DESCRIPTION} file now mentions the Apache License 2.0, as well as the former MPL2 license used for earlier releases. \item A new file \code{init.c} was added with calls to \code{R_registerRoutines()} and \code{R_useDynamicSymbols()} \item Symbol registration is enabled in \code{useDynLib} \item The \code{fastLm} example was updated } } \section{Changes in RcppArmadillo version 0.7.700.0.0 (2017-02-07)}{ \itemize{ \item Upgraded to Armadillo release 7.700.0 (Rogue State) \itemize{ \item added \code{polyfit()} and \code{polyval()} \item added second form of \code{log_det()} to directly return the result as a complex number \item added \code{range()} to statistics functions \item expanded \code{trimatu()}/\code{trimatl()} and \code{symmatu()}/\code{symmatl()} to handle sparse matrice } } } \section{Changes in RcppArmadillo version 0.7.600.2.0 (2017-01-05)}{ \itemize{ \item Upgraded to Armadillo release 7.600.2 (Coup d'Etat Deluxe) \itemize{ \item Bug fix to memory allocation for \code{fields} } } } \section{Changes in RcppArmadillo version 0.7.600.1.0 (2016-12-16)}{ \itemize{ \item Upgraded to Armadillo release 7.600.1 (Coup d'Etat Deluxe) \itemize{ \item more accurate \code{eigs_sym()} and \code{eigs_gen()} \item expanded \code{floor()}, \code{ceil()}, \code{round()}, \code{trunc()}, \code{sign()} to handle sparse matrices \item added \code{arg()}, \code{atan2()}, \code{hypot()} } } } \section{Changes in RcppArmadillo version 0.7.500.1.0 (2016-11-11)}{ \itemize{ \item Upgraded to Armadillo release 7.500.1 \item Small improvement to return value treatment \item The \code{sample.h} extension was updated to the newer Armadillo interface. (Closes \ghit{111}) } } \section{Changes in RcppArmadillo version 0.7.500.0.0 (2016-10-20)}{ \itemize{ \item Upgraded to Armadillo release 7.500.0 (Coup d'Etat) \itemize{ \item Expanded \code{qz()} to optionally specify ordering of the Schur form \item Expanded \code{each_slice()} to support matrix multiplication } } } \section{Changes in RcppArmadillo version 0.7.400.2.0 (2016-08-24)}{ \itemize{ \item Upgraded to Armadillo release 7.400.2 (Feral Winter Deluxe) \itemize{ \item added \code{expmat_sym()}, \code{logmat_sympd()}, \code{sqrtmat_sympd()} \item added \code{.replace()} } } } \section{Changes in RcppArmadillo version 0.7.300.1.0 (2016-07-30)}{ \itemize{ \item Upgraded to Armadillo release 7.300.1 \itemize{ \item added \code{index_min()} and \code{index_max()} standalone functions \item expanded \code{.subvec()} to accept \code{size()} arguments \item more robust handling of non-square matrices by \code{lu()} } } } \section{Changes in RcppArmadillo version 0.7.200.2.0 (2016-07-22)}{ \itemize{ \item Upgraded to Armadillo release 7.200.2 \item The sampling extension was rewritten to use Armadillo vector types instead of Rcpp types (PR \ghpr{101} by James Balamuta) } } \section{Changes in RcppArmadillo version 0.7.200.1.0 (2016-06-06)}{ \itemize{ \item Upgraded to Armadillo release 7.200.1 \itemize{ \item added \code{.index_min()} and \code{.index_max()} \item expanded \code{ind2sub()} to handle vectors of indices \item expanded \code{sub2ind()} to handle matrix of subscripts \item expanded \code{expmat()}, \code{logmat()} and \code{sqrtmat()} to optionally return a bool indicating success \item faster handling of compound expressions by \code{vectorise()} } \item The \code{configure} code now (once again) sets the values for the LAPACK feature \code{#define} correctly. } } \section{Changes in RcppArmadillo version 0.7.100.3.0 (2016-05-25)}{ \itemize{ \item Upgraded to Armadillo test release 7.100.3 \itemize{ \item added \code{erf()}, \code{erfc()}, \code{lgamma()} \item added \code{.head_slices()} and \code{.tail_slices()} to subcube views \item \code{spsolve()} now requires SuperLU 5.2 \item \code{eigs_sym()}, \code{eigs_gen()} and \code{svds()} now use a built-in reimplementation of ARPACK for real (non-complex) matrices (code contributed by Yixuan Qiu) } \item The \code{configure} code now checks against old \code{g++} version which are no longer sufficient to build the package. } } \section{Changes in RcppArmadillo version 0.6.700.6.0 (2016-05-05)}{ \itemize{ \item Upgraded to Armadillo 6.700.6 (Catabolic Amalgamator Deluxe) \itemize{ \item fix for handling empty matrices by \code{kron()} \item fix for clang warning in advanced matrix constructors \item fix for false deprecated warning in \code{trunc_log()} and \code{trunc_exp()} \item fix for gcc-6.1 warning about misleading indentation \item corrected documentation for the \code{solve()} function } \item Added support for \code{int64_t} (\code{ARMA_64BIT_WORD}) when required during compilation time. (PR \ghpr{90} by George G. Vega Yon, fixing \ghpr{88}) \item Fixed bug in \code{SpMat} exporter (PR \ghpr{91} by George G. Vega Yon, fixing \ghit{89} and \ghit{72}) } } \section{Changes in RcppArmadillo version 0.6.700.3.0 (2016-04-05)}{ \itemize{ \item Upgraded to Armadillo 6.700.3 (Catabolic Amalgamator Deluxe) \itemize{ \item added \code{logmat()} for calcuating the matrix logarithm \item added \code{regspace()} for generating vectors with regularly spaced elements \item added \code{logspace()} for generating vectors with logarithmically spaced elements \item added \code{approx_equal()} for determining approximate equality \item added \code{trapz()} for numerical integration \item expanded \code{.save()} and \code{.load()} with hdf5_binary_trans file type, to save/load data with columns transposed to rows } } } \section{Changes in RcppArmadillo version 0.6.600.4.0 (2016-03-15)}{ \itemize{ \item Upgraded to Armadillo 6.600.4 (Catabolic Amalgamator) \itemize{ \item expanded \code{sum()}, \code{mean()}, \code{min()}, \code{max()} to handle cubes \item expanded \code{Cube} class to handle arbitrarily sized empty cubes (eg. 0x5x2) \item added \code{shift()} for circular shifts of elements \item added \code{sqrtmat()} for finding the square root of a matrix \item fix for \code{gmm_diag} when using Mahalanobis distance } \item The \code{configure} script now reflects the full LAPACK fallback offered by R 3.3.0 or later (PR \ghpr{81}) } } \section{Changes in RcppArmadillo version 0.6.500.4.0 (2016-01-26)}{ \itemize{ \item Upgraded to Armadillo 6.500.4 (Gourmet Electron Jumper) \itemize{ \item added \code{conv2()} for 2D convolution \item added stand-alone \code{kmeans()} function for clustering data \item added \code{trunc()} \item extended \code{conv()} to optionally provide central convolution \item faster handling of multiply-and-accumulate by \code{accu()} when using Intel MKL, ATLAS or OpenBLAS } \item The \code{configure} script now uses \code{#!/usr/bin/env bash} to cope with systems that do not have \code{#!/bin/bash} (PR \ghpr{75} fixing issue \ghpr{74}) \item RcppArmadillo now defines ARMA_32BIT_WORD to ensure we always use integer vectors that be passed to R } } \section{Changes in RcppArmadillo version 0.6.400.2.0 (2015-12-15)}{ \itemize{ \item Upgraded to Armadillo 6.400.2 ("Flying Spaghetti Monster Deluxe") \itemize{ \item expanded \code{each_col()}, \code{each_row()} and \code{each_slice()} to handle C++11 lambda functions \item added \code{ind2sub()} and \code{sub2ind()} \item fixes for corner cases in gmm_diag class } } } \section{Changes in RcppArmadillo version 0.6.300.2.2 (2015-12-12)}{ \itemize{ \item Upgraded to Armadillo 6.300.3-test ("Flying Spaghetti Monster") \itemize{ \item Additional test in \code{auxlib_meat.hpp} for limited LAPACK } \item Updated test and \code{#define} for limited LAPACK version R might be built with on Unix-alike systems } } \section{Changes in RcppArmadillo version 0.6.300.2.0 (2015-12-03)}{ \itemize{ \item Upgraded to Armadillo 6.300.2 ("Flying Spaghetti Monster") \itemize{ \item expanded \code{solve()} to find approximate solutions for rank-deficient systems \item faster handling of non-contiguous submatrix views in compound expressions \item added \code{.for_each()} to Mat, Row, Col, Cube and field classes \item added \code{rcond()} for estimating the reciprocal condition number \item fixes for \code{spsolve()}, \code{eigs_sym()}, \code{eigs_gen()}, \code{svds()} } \item Added support for \code{Cube} types via \code{as<>} converters (PR \ghpr{64} by Nathan Russell, fixing \ghit{63} and \ghit{42}) } } \section{Changes in RcppArmadillo version 0.6.200.2.0 (2015-10-31)}{ \itemize{ \item Upgraded to Armadillo 6.200.0 ("Midnight Blue Deluxe") \itemize{ \item expanded \code{diagmat()} to handle non-square matrices and arbitrary diagonals \item expanded \code{trace()} to handle non-square matrices } } } \section{Changes in RcppArmadillo version 0.6.100.0.0 (2015-10-03)}{ \itemize{ \item Upgraded to Armadillo 6.100.0 ("Midnight Blue") \itemize{ \item faster \code{norm()} and \code{normalise()} when using ATLAS or OpenBLAS \item added Schur decomposition: \code{schur()} \item stricter handling of matrix objects by \code{hist()} and \code{histc()} \item advanced constructors for using auxiliary memory by Mat, Col, Row and Cube now have the default of \emph{strict = false} \item Cube class now delays allocation of .slice() related structures until needed \item expanded \code{join_slices()} to handle joining cubes with matrices } } } \section{Changes in RcppArmadillo version 0.6.000.1.0 (2015-09-25)}{ \itemize{ \item Upgraded to Armadillo test / bug-fix release 0.6.000.1-test \item Non-CRAN release } } \section{Changes in RcppArmadillo version 0.5.600.2.0 (2015-09-19)}{ \itemize{ \item Upgraded to Armadillo 5.600.2 ("Molotov Cocktail Deluxe") \itemize{ \item expanded \code{.each_col()} and \code{.each_row()} to handle out-of-place operations \item added \code{.each_slice()} for repeated matrix operations on each slice of a cube \item faster handling of compound expressions by \code{join_rows()} and \code{join_cols()} } } } \section{Changes in RcppArmadillo version 0.5.500.2.0 (2015-09-03)}{ \itemize{ \item Upgraded to Armadillo 5.500.2 ("Molotov Cocktail") \itemize{ \item expanded object constructors and generators to handle \code{size()} based specification of dimensions \item faster handling of submatrix rows \item faster \code{clamp()} \item fixes for handling sparse matrices } } } \section{Changes in RcppArmadillo version 0.5.400.2.0 (2015-08-17)}{ \itemize{ \item Upgraded to Armadillo 5.400.2 ("Plutocracy Incorporated Deluxe") \itemize{ \item added \code{find_unique()} for finding indices of unique values \item added \code{diff()} for calculating differences between consecutive elements \item added \code{cumprod()} for calculating cumulative product \item added \code{null()} for finding the orthonormal basis of null space \item expanded \code{interp1()} to handle repeated locations \item expanded \code{unique()} to handle complex numbers \item faster \code{flupud()} \item faster row-wise \code{cumsum()} \item fix for k-means clustering in gmm_diag class } \item corrected use of \code{kitten()} thanks to Grant Brown } } \section{Changes in RcppArmadillo version 0.5.300.4 (2015-08-03)}{ \itemize{ \item Upgraded to Armadillo 5.300.4 ("Plutocrazy Incorporated") \itemize{ \item added generalised Schur decomposition: \code{qz()} \item added \code{.has_inf()} and \code{.has_nan()} \item expanded \code{interp1()} to handle out-of-domain locations \item expanded sparse matrix class with \code{.set_imag()} and \code{.set_real()} \item expanded \code{imag()}, \code{real()} and \code{conj()} to handle sparse matrices \item expanded \code{diagmat()}, \code{reshape()} and \code{resize()} to handle sparse matrices \item faster sparse \code{sum()} \item faster row-wise \code{sum()}, \code{mean()}, \code{min()}, \code{max()} \item updated physical constants to NIST 2014 CODATA values \item fixes for handling sparse submatrix views \item Armadillo can make use of GPUs by linking with NVIDIA NVBLAS (a GPU-accelerated implementation of BLAS), or by linking with AMD ACML (which can use GPUs via OpenCL) } \item Added \code{importFrom} statements for R functions not from base \item Added explicit \code{Rcpp::sourceCpp()} reference as well \item Updated one formatting detail in vignette to please TeXlive2015 } } \section{Changes in RcppArmadillo version 0.5.200.1.0 (2015-06-04)}{ \itemize{ \item Upgraded to Armadillo release 5.200.1 ("Boston Tea Smuggler") \itemize{ \item added \code{orth()} for finding the orthonormal basis of the range space of a matrix \item expanded element initialisation to handle nested initialiser lists (C++11) \item workarounds for bugs in GCC, Intel and MSVC C++ compilers } \item Added another example to \code{inst/examples/fastLm.r} } } \section{Changes in RcppArmadillo version 0.5.100.2.0 (2015-05-12)}{ \itemize{ \item Upgraded to Armadillo test / bug-fix release 5.100.2 \item Non-CRAN release } } \section{Changes in RcppArmadillo version 0.5.100.1.0 (2015-05-01)}{ \itemize{ \item Upgraded to Armadillo release 5.100.1 ("Ankle Biter Deluxe") \itemize{ \item added \code{interp1()} for 1D interpolation \item added \code{.is_sorted()} for checking whether a vector or matrix has sorted elements \item updated physical constants to NIST 2010 CODATA values } } } \section{Changes in RcppArmadillo version 0.5.000.0 (2015-04-12)}{ \itemize{ \item Upgraded to Armadillo release Version 5.000 ("Ankle Biter") \itemize{ \item added \code{spsolve()} for solving sparse systems of linear equations \item added \code{svds()} for singular value decomposition of sparse matrices \item added \code{nonzeros()} for extracting non-zero values from matrices \item added handling of diagonal views by sparse matrices \item expanded \code{repmat()} to handle sparse matrices \item expanded \code{join_rows()} and \code{join_cols()} to handle sparse matrices \item \code{sort_index()} and \code{stable_sort_index()} have been placed in the delayed operations framework for increased efficiency \item use of 64 bit integers is automatically enabled when using a C++11 compiler \item workaround for a bug in recent releases of Apple Xcode \item workaround for a bug in LAPACK 3.5 } } } \section{Changes in RcppArmadillo version 0.4.999.1.0 (2015-04-04)}{ \itemize{ \item Upgraded to Armadillo release preview 4.999.1 \item Non-CRAN test release } } \section{Changes in RcppArmadillo version 0.4.650.1.1 (2015-02-25)}{ \itemize{ \item Upgraded to Armadillo release Version 4.650.1 ("Intravenous Caffeine Injector") \itemize{ \item added \code{randg()} for generating random values from gamma distributions (C++11 only) \item added \code{.head_rows()} and \code{.tail_rows()} to submatrix views \item added \code{.head_cols()} and \code{.tail_cols()} to submatrix views \item expanded \code{eigs_sym()} to optionally calculate eigenvalues with smallest/largest algebraic values fixes for handling of sparse matrices } \item Applied small correction to main header file to set up C++11 RNG whether or not the alternate RNG (based on R, our default) is used } } \section{Changes in RcppArmadillo version 0.4.600.4.0 (2015-01-23)}{ \itemize{ \item Upgraded to Armadillo release Version 4.600.4 (still "Off The Reservation") \itemize{ \item Speedups in the transpose operation \item Small bug fixes } } } \section{Changes in RcppArmadillo version 0.4.600.0 (2014-12-27)}{ \itemize{ \item Upgraded to Armadillo release Version 4.600 ("Singapore Sling Deluxe") \itemize{ \item added \code{.head()} and \code{.tail()} to submatrix views \item faster matrix transposes within compound expressions \item faster \code{accu()} and \code{norm()} when compiling with -O3 -ffast-math -march=native (gcc and clang) \item workaround for a bug in GCC 4.4 } } } \section{Changes in RcppArmadillo version 0.4.550.2.0 (2014-12-02)}{ \itemize{ \item Upgraded to Armadillo release Version 4.550.2 ("Singapore Sling Deluxe") \itemize{ \item Bug fixes for implicit template initiation for \code{std::pow()} seen with the old g++ 4.4* series } } } \section{Changes in RcppArmadillo version 0.4.550.1.0 (2014-11-26)}{ \itemize{ \item Upgraded to Armadillo release Version 4.550.1 ("Singapore Sling Deluxe") \itemize{ \item added matrix exponential function: \code{expmat()} \item faster \code{.log_p()} and \code{.avg_log_p()} functions in the \code{gmm_diag} class when compiling with OpenMP enabled \item faster handling of in-place addition/subtraction of expressions with an outer product \item applied correction to \code{gmm_diag} relative to the 4.550 release } \item The Armadillo Field type is now converted in \code{as<>} conversions } } \section{Changes in RcppArmadillo version 0.4.500.0 (2014-10-30)}{ \itemize{ \item Upgraded to Armadillo release Version 4.500 ("Singapore Sling") \itemize{ \item faster handling of complex vectors by \code{norm()} \item expanded \code{chol()} to optionally specify output matrix as upper or lower triangular \item better handling of non-finite values when saving matrices as text files } \item The \code{sample} functionality has been extended to provide the Walker Alias method (including new unit tests) via a pull request by Christian Gunning } } \section{Changes in RcppArmadillo version 0.4.450.1.0 (2014-09-21)}{ \itemize{ \item Upgraded to Armadillo release Version 4.450.1 (Spring Hill Fort) \itemize{ \item faster handling of matrix transposes within compound expressions \item expanded \code{symmatu()}/\code{symmatl()} to optionally disable taking the complex conjugate of elements \item expanded \code{sort_index()} to handle complex vectors \item expanded the \code{gmm_diag} class with functions to generate random samples } \item A new random-number implementation for Armadillo uses the RNG from R as a fallback (when C++11 is not selected so the C++11-based RNG is unavailable) which avoids using the older C++98-based \code{std::rand} \item The \code{RcppArmadillo.package.skeleton()} function was updated to only set an "Imports:" for Rcpp, but not RcppArmadillo which (as a template library) needs only LinkingTo: \item The \code{RcppArmadillo.package.skeleton()} function will now prefer \code{pkgKitten::kitten()} over \code{package.skeleton()} in order to create a working package which passes \code{R CMD check}. \item The \CRANpkg{pkgKitten} package is now a \code{Suggests:} \item A manual page was added to provide documentation for the functions provided by the skeleton package. \item A small update was made to the package manual page. } } \section{Changes in RcppArmadillo version 0.4.400.0 (2014-08-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.400 (Winter Shark Alley) \itemize{ \item added \code{gmm_diag} class for statistical modelling using Gaussian Mixture Models; includes multi-threaded implementation of k-means and Expectation-Maximisation for parameter estimation \item added \code{clamp()} for clamping values to be between lower and upper limits \item expanded batch insertion constructors for sparse matrices to add values at repeated locations \item faster handling of subvectors by \code{dot()} \item faster handling of aliasing by submatrix views } \item Corrected a bug (found by the g++ Address Sanitizer) in sparse matrix initialization where space for a sentinel was allocated, but the sentinel was not set; with extra thanks to Ryan Curtin for help \item Added a few unit tests for sparse matrices } } \section{Changes in RcppArmadillo version 0.4.320.0 (2014-07-03)}{ \itemize{ \item Upgraded to Armadillo release Version 4.320 (Daintree Tea Raider) \itemize{ \item expanded \code{eigs_sym()} and \code{eigs_gen()} to use an optional tolerance parameter \item expanded \code{eig_sym()} to automatically fall back to standard decomposition method if divide-and-conquer fails \item automatic installer enables use of C++11 random number generator when using gcc 4.8.3+ in C++11 mode } } } \section{Changes in RcppArmadillo version 0.4.300.8.0 (2014-05-31)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.8 (Medieval Cornea Scraper) \itemize{ \item More robust \code{norm}-related functions \item Fixes between interactions between \code{cube} and \code{vector} types. } \item Adds a \code{#define ARMA_DONT_USE_CXX11} to provide an option to turn C++11 off for Armadillo (but client packages may still use it) \item More robust Windows detection by using \code{_WIN32} as well as \code{WIN32} as the latter gets diabled by MinGW with C++11 \item On Windows, C++11 is turned off as the Armadillo code base uses more features of C++11 than g++ 4.6.2 version in Rtools implements } } \section{Changes in RcppArmadillo version 0.4.300.5.0 (2014-05-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.5 (Medieval Cornea Scraper) \itemize{ \item Handle possible underflows and overflows in \code{norm()}, \code{normalise()}, \code{norm_dot()} \item Fix for handling of null vectors by \code{norm_dot()} } } } \section{Changes in RcppArmadillo version 0.4.300.2.0 (2014-05-13)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.2 (Medieval Cornea Scraper) \itemize{ \item faster \code{find()} } } } \section{Changes in RcppArmadillo version 0.4.300.0 (2014-05-04)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300 (Medieval Cornea Scraper) \itemize{ \item faster \code{find()} \item added \code{find_finite()} and \code{find_nonfinite()} for finding indices of finite and non-finite elements \item expressions \code{X=inv(A)*B*C} and \code{X=A.i()*B*C} are automatically converted to X=solve(A,B*C) } \item Corrected conversion to \code{unsigned int} vectors and matrices \item Configure script now checks for R version 3.0.3 or newer to enable complex divide-and-conquer SVD in case of R-supplied LAPACK } } \section{Changes in RcppArmadillo version 0.4.200.0 (2014-04-07)}{ \itemize{ \item Upgraded to Armadillo release Version 4.200 (Flintlock Swoop) \itemize{ \item faster transpose of sparse matrices \item more efficient handling of aliasing during matrix multiplication \item faster inverse of matrices marked as diagonal } } } \section{Changes in RcppArmadillo version 0.4.100.2 (2014-03-07)}{ \itemize{ \item Upgraded to Armadillo release Version 4.100.2 \itemize{ \item fix for handling null vectors by \code{normalise()} \item fix for memory handling by sparse matrices } \item Correct use of \code{[[ depends()]]} in skeleton example file \item Prepare \code{src/Makevars} for C++11 support from R 3.1.0 by defining \code{USE_CXX11} which is currently commented out \item In the Armadillo configuration, turn on C++11 support if \code{USE_CXX11} is defined } } \section{Changes in RcppArmadillo version 0.4.100.0 (2014-02-28)}{ \itemize{ \item Upgraded to Armadillo release Version 4.100.0 (Dirt Cruiser) \itemize{ \item added \code{normalise()} for normalising vectors to unit p-norm \item extended the \code{field} class to handle 3D layout \item extended \code{eigs_sym()} and \code{eigs_gen()} to obtain eigenvalues of various forms (eg. largest or smallest magnitude) \item automatic SIMD vectorisation of elementary expressions (eg. matrix addition) when using Clang 3.4+ with -O3 optimisation \item faster handling of sparse submatrix views \item workaround for a bug in LAPACK 3.4 } } } \section{Changes in RcppArmadillo version 0.4.000.4 (2014-02-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000.4 \itemize{ \item fix for \code{randi()} generating out-of-interval values \item fix for saving field objects \item workaround for a bug in the Intel compiler } \item Updated for \CRANpkg{Rcpp} (>= 0.11.0) by removing linking step from build process, added appropriate dependency on \CRANpkg{Rcpp} \item Updated \code{RcppArmadillo.package.skeleton} function accordingly to use proper \code{NAMESPACE} import \item Rewritten \code{rcpparma_hello_world} (which is used by the \code{RcppArmadillo.package.skeleton} function) to use Rcpp Attributes, and added more examples \item Added two functions to set Armadillo's RNGs (ie the system RNG) from a given value and to randomize it, as suggested by Gábor Csárdi. Note that these do not work within RStudio (which itself also uses the same system RNG). } } \section{Changes in RcppArmadillo version 0.4.000.2 (2014-01-21)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000.2 \itemize{ \item fix for \code{randi()} generating out-of-interval values \item workaround for a bug in the Intel compiler } } } \section{Changes in RcppArmadillo version 0.4.000 (2014-01-05)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000 (Feral Steamroller) \itemize{ \item added eigen decompositions of sparse matrices: \code{eigs_sym()} and \code{eigs_gen()} [ but this requires linking against ARPACK which \CRANpkg{RcppArmadillo} as a pure-template package does not do, and R is not linked against ARPACK either. ] \item added eigen decomposition for pair of matrices: \code{eig_pair()} \item added simpler forms of \code{eig_gen()} \item added condition number of matrices: \code{cond()} \item expanded \code{find()} to handle cubes \item expanded subcube views to access elements specified in a vector \item template argument for \code{running_stat_vec} expanded to accept vector types \item more robust fast inverse of 4x4 matrices \item faster divide-and-conquer decompositions are now used by default for \code{eig_sym()}, \code{pinv()}, \code{princomp()}, \code{rank()}, \code{svd()}, \code{svd_econ()} \item the form \code{inv(sympd(X))} no longer assumes that X is positive definite; use \code{inv_sympd()} instead \item added MEX connector for interfacing Octave/Matlab with Armadillo matrices (contributed by George Yammine) } } } \section{Changes in RcppArmadillo version 0.3.930.1 (2013-12-09)}{ \itemize{ \item Upgraded to Armadillo release Version 3.930.1 \itemize{ \item Armadillo falls back to standard complex svd if the more performant divide-and-conquer variant is unavailable } \item Added detection for Lapack library and distinguish between R's own version (withhout zgesdd) and system Lapack; a preprocessor define is set accordingly } } \section{Changes in RcppArmadillo version 0.3.930.0 (2013-12-06)}{ \itemize{ \item Upgraded to Armadillo release Version 3.930 ("Dragon's Back") \itemize{ \item added divide-and-conquer variant of \code{svd_econ()}, for faster SVD \item added divide-and-conquer variant of \code{pinv()}, for faster pseudo-inverse \item added element-wise variants of \code{min()} and \code{max()} \item added \code{size()} based specifications of submatrix view sizes \item added \code{randi()} for generating matrices with random integer values \item added more intuitive specification of sort direction in \code{sort()} and \code{sort_index()} \item added more intuitive specification of method in \code{det()}, \code{.i()}, \code{inv()} and \code{solve()} \item added more precise timer for the \code{wall_clock} class when using C++11 } \item New unit tests for complex matrices and vectors } } \section{Changes in RcppArmadillo version 0.3.920.3 (2013-11-20)}{ \itemize{ \item Upgraded to Armadillo release Version 3.920.3 \itemize{ \item fix for handling of tiny matrices by \code{.swap()} } } } \section{Changes in RcppArmadillo version 0.3.920.1 (2013-09-27)}{ \itemize{ \item Upgraded to Armadillo release Version 3.920.1 (Agencia Nacional Stasi) \itemize{ \item faster \code{.zeros()} \item faster \code{round()}, \code{exp2()} and \code{log2()} when using C++11 \item added signum function: \code{sign()} \item added move constructors when using C++11 \item added 2D fast Fourier transform: \code{fft2()} \item added \code{.tube()} for easier extraction of vectors and subcubes from cubes \item added specification of a fill type during construction of Mat, Col, Row and Cube classes, eg. \code{mat X(4, 5, fill::zeros)} } \item Initial implementation of \code{wrap<subview>} \item Improved implementation of \code{as<>()} and \code{wrap()} for sparse matrices \item Converted main vignette from \code{LaTeX} style \code{minted} to \code{lstlisting} which permits builds on CRAN; removed set \code{BuildVignettes: FALSE}. } } \section{Changes in RcppArmadillo version 0.3.910.0 (2013-08-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.910.0 (Pyrenees) \itemize{ \item faster multiplication of a matrix with a transpose of itself, ie. \code{X*X.t()} and \code{X.t()*X} \item added \code{vectorise()} for reshaping matrices into vectors \item added \code{all()} and \code{any()} for indicating presence of elements satisfying a relational condition } \item Added conversion support for sparse matrices (of type double) created by the \CRANpkg{Matrix} package as class \code{dgCMatrix} \item Moved vignette sources from \code{inst/doc} to \code{vignettes}; set \code{BuildVignettes: FALSE} as the \code{minted} mode for \code{LaTeX} upsets the CRAN builders. } } \section{Changes in RcppArmadillo version 0.3.900.7 (2013-08-02)}{ \itemize{ \item Upgraded to Armadillo release Version 3.900.7 (Bavarian Sunflower) \itemize{ \item minor fix for inplace \code{reshape()} \item minor corrections for compilation issues under GCC 4.8+ and MSVC } \item Corrected setting of \code{vec_stats} in intialization of row, vector and matrix objects \item The \pkg{inline} package is no longer used in the examples and unit tests which have all been converted to using Rcpp attributes } } \section{Changes in RcppArmadillo version 0.3.900 (2013-06-04)}{ \itemize{ \item Upgraded to Armadillo release Version 3.900.0 (Bavarian Sunflower) \itemize{ \item added automatic SSE2 vectorisation of elementary expressions (eg. matrix addition) when using GCC 4.7+ with -O3 optimisation \item added support for saving & loading of cubes in HDF5 format, contributed by Szabolcs Horvat \item faster \code{median()}, contributed by Ruslan Shestopalyuk \item faster handling of compound expressions with transposes of submatrix rows \item faster handling of compound expressions with transposes of complex vectors } \item Kalman filter example switched from inline to \code{sourceCpp}, which simplifies / streamlines the C++ side a little } } \section{Changes in RcppArmadillo version 0.3.820 (2013-05-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.820 (Mt Cootha) \itemize{ \item faster \code{as_scalar()} for compound expressions \item faster transpose of small vectors \item faster matrix-vector product for small vectors \item faster multiplication of small fixed size matrices } } } \section{Changes in RcppArmadillo version 0.3.810.2 (2013-04-30)}{ \itemize{ \item Upgraded to Armadillo release Version 3.810.2 \itemize{ \item minor fix for initialisation of sparse matrices } } } \section{Changes in RcppArmadillo version 0.3.810.0 (2013-04-19)}{ \itemize{ \item Upgraded to Armadillo release Version 3.810.0 (Newell Highway) \itemize{ \item added fast Fourier transform: \code{fft()} \item added handling of \code{.imbue()} and \code{.transform()} by submatrices and subcubes \item added batch insertion constructors for sparse matrices \item minor fix for multiplication of complex sparse matrices } \item Updated sample() function and test again contributed by Christian Gunning } } \section{Changes in RcppArmadillo version 0.3.800.1 (2013-03-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.800.1 (Miami Beach) \itemize{ \item workaround for a bug in ATLAS 3.8 on 64 bit systems \item faster matrix-vector multiply for small matrices } \item Added new sample() function and tests contributed by Christian Gunning \item Refactored unit testing code for faster unit test performance } } \section{Changes in RcppArmadillo version 0.3.800.0 (2013-03-01)}{ \itemize{ \item Upgraded to Armadillo release Version 3.800.0 (Miami Beach) \itemize{ \item Armadillo is now licensed using the Mozilla Public License 2.0 \item added \code{.imbue()} for filling a matrix/cube with values provided by a functor or lambda expression \item added \code{.swap()} for swapping contents with another matrix \item added \code{.transform()} for transforming a matrix/cube using a functor or lambda expression \item added \code{round()} for rounding matrix elements towards nearest integer \item faster \code{find()} \item fixes for handling non-square matrices by \code{qr()} and \code{qr_econ()} \item minor fixes for handling empty matrices \item reduction of pedantic compiler warnings } \item Updated vignette to paper now in press at CSDA \item Added CITATION file with reference to CSDA paper } } \section{Changes in RcppArmadillo version 0.3.6.3 (2013-02-20)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.3 \itemize{ \item faster \code{find()} \item minor fix for non-contiguous submatrix views to handle empty vectors of indices \item reduction of pedantic compiler warnings } } } \section{Changes in RcppArmadillo version 0.3.6.2 (2013-01-29)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.2 \itemize{ \item faster determinant for matrices marked as diagonal or triangular \item more fine-grained handling of 64 bit integers } \item Added a new example of a Kalman filter implementation in R, and C++ using Armadillo via RcppArmadillo, complete with timing comparison } } \section{Changes in RcppArmadillo version 0.3.6.1 (2012-12-17)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.1 (Piazza del Duomo) \itemize{ \item faster \code{trace()} \item fix for handling sparse matrices by \code{dot()} \item fixes for interactions between sparse and dense matrices } \item Now throws compiler error if \code{Rcpp.h} is included before \code{RcppArmadillo.h} (as the former is included automatically by the latter anyway, but template logic prefers this ordering). } } \section{Changes in RcppArmadillo version 0.3.4.3 (2012-10-04)}{ \itemize{ \item Upgraded to Armadillo release 3.4.3 \itemize{ \item fix for aliasing issue in \code{diagmat()} \item fix for \code{speye()} signature } } } \section{Changes in RcppArmadillo version 0.3.4.2 (2012-09-25)}{ \itemize{ \item Upgraded to Armadillo release 3.4.2 \itemize{ \item minor fixes for handling sparse submatrix views \item minor speedups for sparse matrices } } } \section{Changes in RcppArmadillo version 0.3.4.1 (2012-09-18)}{ \itemize{ \item Upgraded to Armadillo release 3.4.1 \itemize{ \item workaround for a bug in the Mac OS X accelerate framework \item fixes for handling empty sparse matrices \item added documentation for saving & loading matrices in HDF5 format \item faster dot() and cdot() for complex numbers } } } \section{Changes in RcppArmadillo version 0.3.4.0 (2012-09-06)}{ \itemize{ \item Upgraded to Armadillo release 3.4.0 (Ku De Ta) \itemize{ \item added economical QR decomposition: qr_econ() \item added .each_col() & .each_row() for vector operations repeated on each column or row \item added preliminary support for sparse matrices, contributed by Ryan Curtin et al. (Georgia Institute of Technology) \item faster singular value decomposition via divide-and-conquer algorithm \item faster .randn() } \item NEWS file converted to Rd format } } \section{Changes in RcppArmadillo version 0.3.3.91 (2012-08-30)}{ \itemize{ \item Upgraded to Armadillo release 3.3.91 \itemize{ \item faster singular value decomposition via "divide and conquer" algorithm \item added economical QR decomposition: qr_econ() \item added .each_col() & .each_row() for vector operations repeated on each column or row \item added preliminary support for sparse matrices, contributed by Ryan Curtin, James Cline and Matthew Amidon (Georgia Institute of Technology) } \item Corrected summary method to deal with the no intercept case when using a formula; also display residual summary() statistics \item Expanded unit tests for fastLm } } \section{Changes in RcppArmadillo version 0.3.2.4 (2012-07-11)}{ \itemize{ \item Upgraded to Armadillo release 3.2.4 \itemize{ \item workaround for a regression (bug) in GCC 4.7.0 and 4.7.1 } } } \section{Changes in RcppArmadillo version 0.3.2.3 (2012-07-01)}{ \itemize{ \item Upgraded to Armadillo release 3.2.3 \itemize{ \item minor correction for declaration of fixed size vectors and matrices \item Reverted three header files \{Mat,Row,Col\}_bones.hpp back to previous release due to compilation failures under g++-4.7 \item Added new vignette 'RcppArmadillo-intro' based on a just-submitted introductory paper (by Eddelbuettel and Sanderson) about RcppArmadillo \item Change from release 3.2.2 which we skipped as it did not really affect builds under R: \itemize{ \item minor fix for compiling without debugging enabled (aka release mode) \item better detection of ATLAS during installation on Fedora and Red Hat systems } \item Small enhancement to fastLm } } } \section{Changes in RcppArmadillo version 0.3.2.0 (2012-05-21)}{ \itemize{ \item Upgraded to Armadillo release 3.2.0 "Creamfields" \itemize{ \item faster eigen decomposition via "divide and conquer" algorithm \item faster transpose of vectors and compound expressions \item faster handling of diagonal views \item faster handling of tiny fixed size vectors (≤ 4 elements) \item added unique(), for finding unique elements of a matrix } } } \section{Changes in RcppArmadillo version 0.3.1.94 (2012-05-15)}{ \itemize{ \item Upgraded to Armadillo release 3.1.94 "v3.2 beta 2" \itemize{ \item added unique(), for finding unique elements of a matrix \item faster eigen decomposition via "divide and conquer" algorithm \item faster transpose of vectors and compound expressions \item faster handling of tiny fixed size vectors (≤ 4 elements) } } } \section{Changes in RcppArmadillo version 0.3.1.92 (2012-05-10)}{ \itemize{ \item Upgraded to Armadillo release 3.1.92 "v3.2 beta 2" \itemize{ \item added unique(), for finding unique elements of a matrix \item faster eigen decomposition via optional use of "divide and conquer" by eig_sym() \item faster transpose of vectors and compound expressions } } } \section{Changes in RcppArmadillo version 0.3.0.3 (2012-05-03)}{ \itemize{ \item Upgraded to Armadillo release 3.0.3 \itemize{ \item fixes for inplace transpose of complex number matrices \item fixes for complex number version of svd_econ() \item fixes for potential aliasing issues with submatrix views } \item New example script fastLm } } \section{Changes in RcppArmadillo version 0.3.0.2 (2012-04-19)}{ \itemize{ \item Upgraded to Armadillo release 3.0.2 \itemize{ \item fixes for handling diagonal matrices } \item Undefine NDEBUG if it has been set (as R does) as this prevents a number of useful debugging checks. Users can still define it or define ARMA_NO_DEBUG if they want a 'non-development' build } } \section{Changes in RcppArmadillo version 0.3.0.1 (2012-04-12)}{ \itemize{ \item Upgraded to Armadillo release 3.0.1 \itemize{ \item fixes for compilation errors \item fixes for potential aliasing issues } } } \section{Changes in RcppArmadillo version 0.3.0 (2012-04-10)}{ \itemize{ \item Upgraded to Armadillo release 3.0.0 "Antarctic Chilli Ranch" \itemize{ \item added non-contiguous submatrix views \item added shorthand for inverse: .i() \item added hist() and histc() \item faster repmat() \item faster handling of submatrix views with one row or column \item faster generation of random numbers \item faster element access in fixed size matrices \item better detection of vector expressions by sum(), cumsum(), prod(), min(), max(), mean(), median(), stddev(), var() \item expressions X=A.i()*B and X=inv(A)*B are automatically converted to X=solve(A,B) } } } \section{Changes in RcppArmadillo version 0.2.40 (2012-04-04)}{ \itemize{ \item Upgraded to Armadillo release 2.99.4 "Antarctic Chilli Ranch (Beta 4)" \itemize{ \item fixes for handling expressions with fixed size matrices } } } \section{Changes in RcppArmadillo version 0.2.39 (2012-04-02)}{ \itemize{ \item Upgraded to Armadillo release 2.99.3 "Antarctic Chilli Ranch (Beta 3)" \itemize{ \item faster repmat() \item workarounds for braindead compilers (eg. Visual Studio) } } } \section{Changes in RcppArmadillo version 0.2.38 (2012-03-28)}{ \itemize{ \item Upgraded to Armadillo release 2.99.2 "Antarctic Chilli Ranch (Beta 2)" \itemize{ \item added .i() \item much faster handling of .col() and .row() \item expressions X=A.i()*B and X=inv(A)*B are automatically converted to X=solve(A,B) } } } \section{Changes in RcppArmadillo version 0.2.37 (2012-03-19)}{ \itemize{ \item Upgraded to Armadillo release 2.99.1 "Antarctic Chilli Ranch (Beta 1)" \itemize{ \item added non-contiguous submatrix views \item added hist() and histc() \item faster handling of submatrix views \item faster generation of random numbers \item faster element access in fixed size matrices \item better detection of vector expressions by sum(), cumsum(), prod(), min(), max(), mean(), median(), stddev(), var() } } } \section{Changes in RcppArmadillo version 0.2.36 (2012-03-05)}{ \itemize{ \item Upgraded to Armadillo release 2.4.4 \itemize{ \item fixes for qr() and syl() \item more portable wall_clock class \item faster relational operators on submatrices } } } \section{Changes in RcppArmadillo version 0.2.35 (2012-02-17)}{ \itemize{ \item Upgraded to Armadillo release 2.4.3 \itemize{ \item Support for ARMA_DEFAULT_OSTREAM using Rcpp::Rcout added \item Minor bug fix release improving corner cases affecting builds: \itemize{ \item Missing semicolon added in Mat_meat (when in C++0x mode), with thanks to Teo Guo Ci \item Armadillo version vars now instantiated in RcppArmadillo.cpp which helps older g++ versions, with thanks to Gershon Bialer \item Thanks also to Martin Renner for testing these changes \item Unit tests output fallback directory changed per Brian Ripley's request to not ever use /tmp \item Minor update to version numbers in RcppArmadillo-package.Rd } } } } \section{Changes in RcppArmadillo version 0.2.34 (2011-12-12)}{ \itemize{ \item Upgraded to Armadillo release 2.4.2 \itemize{ \item clarified documentation for .reshape() \item fix for handling of empty matrices by .resize() } } } \section{Changes in RcppArmadillo version 0.2.33 (2011-12-07)}{ \itemize{ \item Upgraded to Armadillo release 2.4.1 \itemize{ \item added .resize() \item fix for vector initialisation } } } \section{Changes in RcppArmadillo version 0.2.32 (2011-12-04)}{ \itemize{ \item Upgraded to Armadillo test release 2.4.0 "Loco Lounge Lizard" \item Minimal changes relative to 0.2.31 based on 2.3.92, next section is relative to the previous stable release series 2.2.* of Armadillo \itemize{ \item added shorter forms of transposes: .t() and .st() \item added optional use of 64 bit indices, allowing matrices to have more than 4 billion elements \item added experimental support for C++11 initialiser lists \item faster pinv() \item faster inplace transpose \item faster handling of expressions with diagonal views \item fixes for handling expressions with aliasing and submatrices \item fixes for linking on Ubuntu and Debian systems \item fixes for inconsistencies in interactions between matrices and cubes \item refactored code to eliminate warnings when using the Clang C++ compiler \item .print_trans() and .raw_print_trans() are deprecated } } } \section{Changes in RcppArmadillo version 0.2.31 (2011-11-28)}{ \itemize{ \item Upgraded to Armadillo test release 2.3.92 "Loco Lounge Lizard (Beta 2)" \itemize{ \item fixes for linking on Ubuntu and Debian systems \item fixes for inconsistencies in interactions between matrices and cubes } } } \section{Changes in RcppArmadillo version 0.2.30 (2011-11-19)}{ \itemize{ \item Upgraded to Armadillo test release 2.3.91 "Loco Lounge Lizard (Beta 1)" \itemize{ \item added shorter forms of transposes: .t() and .st() \item added optional use of 64 bit indices, allowing matrices to have more than 4 billion elements \item added experimental support for C++11 initialiser lists \item faster pinv() \item faster inplace transpose \item bugfixes for handling expressions with aliasing and submatrices \item refactored code to eliminate warnings when using the Clang C++ compiler \item .print_trans() and .raw_print_trans() are deprecated } } } \section{Changes in RcppArmadillo version 0.2.29 (2011-09-01)}{ \itemize{ \item Upgraded to Armadillo release 2.2.3 \itemize{ \item Release fixes a speed issue in the as_scalar() function. } } } \section{Changes in RcppArmadillo version 0.2.28 (2011-08-02)}{ \itemize{ \item Upgraded to Armadillo release 2.2.1 "Blue Skies Debauchery" \itemize{ \item faster multiplication of small matrices \item faster trans() \item faster handling of submatrices by norm() \item added economical singular value decomposition: svd_thin() \item added circ_toeplitz() \item added .is_colvec() & .is_rowvec() \item fixes for handling of complex numbers by cov(), cor(), running_stat_vec } } } \section{Changes in RcppArmadillo version 0.2.27 (2011-07-22)}{ \itemize{ \item Upgraded to Armadillo release 2.1.91 "v2.2 beta 1" \itemize{ \item faster multiplication of small matrices \item faster trans() \item faster handling of submatrices by norm() \item added economical singular value decomposition: svd_thin() \item added circ_toeplitz() \item added .is_colvec() & .is_rowvec() } } } \section{Changes in RcppArmadillo version 0.2.26 (2011-07-17)}{ \itemize{ \item Upgraded to Armadillo release 2.0.2 \itemize{ \item fix for handling of conjugate transpose by as_scalar() \item fix for handling of aliasing by diagmat() \item fix for handling of empty matrices by symmatu()/symmatl() } } } \section{Changes in RcppArmadillo version 0.2.25 (2011-06-30)}{ \itemize{ \item Upgraded to Armadillo 2.0.1 which fixes two minor compilation issues } } \section{Changes in RcppArmadillo version 0.2.24 (2011-06-29)}{ \itemize{ \item Upgraded to Armadillo release 2.0.0 "Carnivorous Sugar Glider" \itemize{ \item faster multiplication of tiny matrices (≤ 4x4) \item faster compound expressions containing submatrices \item faster inverse of symmetric positive definite matrices \item faster element access for fixed size matrices \item added handling of arbitrarily sized empty matrices (eg. 5x0) \item added loading & saving of matrices as CSV text files \item added .count() member function to running_stat and running_stat_vec \item added syl(), strans(), symmatu()/symmatl() \item added submatrices of submatrices \item det(), inv() and solve() can be forced to use more precise \item algorithms for tiny matrices (≤ 4x4) \item htrans() has been deprecated; use trans() instead \item API change: trans() now takes the complex conjugate when transposing a complex matrix \item API change: .is_vec() now outputs true for empty vectors (eg. 0x1) \item API change: forms of chol(), eig_sym(), eig_gen(), inv(), lu(), pinv(), princomp(), qr(), solve(), svd(), syl() that do not return a bool indicating success now throw std::runtime_error exceptions when failures are detected \item API change: princomp_cov() has been removed; princomp() in conjunction with cov() can be used instead \item API change: set_log_stream() & get_log_stream() have been replaced by set_stream_err1() & get_stream_err1() } } } \section{Changes in RcppArmadillo version 0.2.23 (2011-06-23)}{ \itemize{ \item Upgraded to Armadillo release 1.99.5 "v2.0 beta 5" \itemize{ \item Forms of chol(), eig_sym(), eig_gen(), inv(), lu(), pinv(), princomp(), qr(), solve(), svd(), syl() that do not return a bool indicating success now throw std::runtime_error exceptions when failures are detected \item princomp_cov() has been removed; princomp() in conjunction with cov() can be used instead \item set_log_stream() & get_log_stream() have been replaced by set_stream_err1() & get_stream_err1() \item det(), inv() and solve() can be forced to use more precise algorithms for tiny matrices (≤ 4x4) \item Added loading & saving of matrices as CSV text files } \item fastLmPure() now uses same argument order as R's lm.fit() \item Export and document S3 methods in NAMESPACE and manual page as such } } \section{Changes in RcppArmadillo version 0.2.22 (2011-06-06)}{ \itemize{ \item Upgraded to Armadillo release 1.99.4 "v2.0 beta 4" \itemize{ \item fixes for handling of tiny matrices } } } \section{Changes in RcppArmadillo version 0.2.21 (2011-05-27)}{ \itemize{ \item Upgraded to Armadillo release 1.99.3 "v2.0 beta 3" \itemize{ \item stricter size checking for row and column vectors \item added .count() member function to running_stat and running_stat_vec } } } \section{Changes in RcppArmadillo version 0.2.20 (2011-05-25)}{ \itemize{ \item Upgraded to Armadillo release 1.99.2 "v2.0 beta 2" (and 1.99.1 before) \itemize{ \item faster inverse of symmetric matrices \item faster element access for fixed size matrices \item faster multiplication of tiny matrices (eg. 4x4) \item faster compund expressions containing submatrices \item added handling of arbitrarily sized empty matrices (eg. 5x0) \item added syl() \item added strans() \item added symmatu()/symmatl() \item added submatrices of submatrices \item htrans() has been deprecated; use trans() instead \item trans() now takes the complex conjugate when transposing a complex matrix \item .is_vec() now outputs true for empty matrices \item most functions with matrix inputs no longer throw exceptions when given empty matrices (eg. 5x0) } \item Added a new subdirectory examples/ seeded with a nice Vector Autoregression simulation simulation example by Lance Bachmeier \item Rewrote armadillo_version as to no longer require an instance of arma::arma_version, with tanks to Conrad for the suggestion } } \section{Changes in RcppArmadillo version 0.2.19 (2011-04-18)}{ \itemize{ \item Upgraded to Armadillo version 1.2.0 "Unscrupulous Carbon Emitter" \itemize{ \item Added ability to use Blas & Lapack libraries with capitalised function names \item Reduction of pedantic compiler warnings } } } \section{Changes in RcppArmadillo version 0.2.18 (2011-04-03)}{ \itemize{ \item Upgraded to Armadillo version 1.1.92 "Jurassic Barbecue" \itemize{ \item Bugfix in cor() \item Automatic installation now requires CMake >= 2.6 } } } \section{Changes in RcppArmadillo version 0.2.17 (2011-03-22)}{ \itemize{ \item Upgraded to Armadillo version 1.1.90 "Inside Job" \itemize{ \item Added .min() & .max(), which can provide the extremum's location \item More robust mean(), var(), stddev() } } } \section{Changes in RcppArmadillo version 0.2.16 (2011-03-10)}{ \itemize{ \item Upgraded to Armadillo version 1.1.8 "Kangaroo Steak" \itemize{ \item Added floor() and ceil() \item Added “not a number”: math::nan() \item Added infinity: math::inf() \item Added standalone is_finite() \item Faster min(), max(), mean() \item Bugfix for a corner case with NaNs in min() and max() } } } \section{Changes in RcppArmadillo version 0.2.15 (2011-03-04)}{ \itemize{ \item Upgraded to Armadillo version 1.1.6 “Baby Carpet Shark” \itemize{ \item fixed size matrices and vectors can use auxiliary (external) memory \item .in_range() can use span() arguments \item subfields can use span() arguments } } } \section{Changes in RcppArmadillo version 0.2.14 (2011-03-02)}{ \itemize{ \item Support Run-Time Type Information (RTTI) on matrices by setting the state variable vec_state in Row and Col instantiation, with thanks to Conrad Sanderson for the hint \item fastLm code simplified further by instantiating the Armadillo matrix and vector directly from the SEXP coming from R \item inst/doc/Makefile now respects $R_HOME environment variable } } \section{Changes in RcppArmadillo version 0.2.13 (2011-02-18)}{ \itemize{ \item Upgraded to Armadillo version 1.1.4 “Manta Lodge” \itemize{ \item Faster sort() \item Updated installation to detect recent versions of Intel's MKL \item Added interpretation of arbitrary "flat" subcubes as matrices } } } \section{Changes in RcppArmadillo version 0.2.12 (2011-02-15)}{ \itemize{ \item Upgraded to Armadillo version 1.1.2 “Flood Kayak” \itemize{ \item Faster prod() \item Faster solve() for compound expressions \item Fix for compilation using GCC's C++0x mode \item Fix for matrix handling by subcubes } } } \section{Changes in RcppArmadillo version 0.2.11 (2011-01-06)}{ \itemize{ \item Upgraded to Armadillo version 1.1.0 “Climate Vandal” \itemize{ \item Extended submatrix views, including access to elements whose indices are specified in a separate vector \item Added handling of raw binary files by save/load functions \item Added cumsum() \item Added interpretation of matrices as triangular via trimatu()/trimatl() \item Faster solve(), inv() via explicit handling of triangular matrices \item The stream for logging of errors and warnings can now be changed } \item New unexported R function SHLIB, a small wrapper around R CMD SHLIB, which can be used as Rscript -e "RcppArmadillo:::SHLIB('foo.cpp')" } } \section{Changes in RcppArmadillo version 0.2.10 (2010-11-25)}{ \itemize{ \item Upgraded to Armadillo 1.0.0 "Antipodean Antileech" \itemize{ \item After 2 1/2 years of collaborative development, we are proud to release the 1.0 milestone version. \item Many thanks are extended to all contributors and bug reporters. } \item R/RcppArmadillo.package.skeleton.R: Updated to no longer rely on GNU make for builds of packages using RcppArmadillo \item summary() for fastLm() objects now returns r.squared and adj.r.squared } } \section{Changes in RcppArmadillo version 0.2.9 (2010-11-11)}{ \itemize{ \item Upgraded to Armadillo 0.9.92 "Wall Street Gangster": \itemize{ \item Fixes for compilation issues under the Intel C++ compiler \item Added matrix norms } } } \section{Changes in RcppArmadillo version 0.2.8 (2010-10-16)}{ \itemize{ \item Upgraded to Armadillo 0.9.90 "Water Dragon": \itemize{ \item Added unsafe_col() \item Speedups and bugfixes in lu() \item Minimisation of pedantic compiler warnings } \item Switched NEWS and ChangeLog between inst/ and the top-level directory so that NEWS (this file) gets installed with the package } } \section{Changes in RcppArmadillo version 0.2.7 (2010-09-25)}{ \itemize{ \item Upgraded to Armadillo 0.9.80 "Chihuahua Muncher": \itemize{ \item Added join_slices(), insert_slices(), shed_slices() \item Added in-place operations on diagonals \item Various speedups due to internal architecture improvements } } } \section{Changes in RcppArmadillo version 0.2.6 (2010-09-12)}{ \itemize{ \item Upgraded to Armadillo 0.9.70 "Subtropical Winter Safari" \item arma::Mat, arma::Row and arma::Col get constructor that take vector or matrix sugar expressions. See the unit test "test.armadillo.sugar.ctor" and "test.armadillo.sugar.matrix.ctor" for examples. } } \section{Changes in RcppArmadillo version 0.2.5 (2010-08-05)}{ \itemize{ \item Upgraded to Armadillo 0.9.60 "Killer Bush Turkey" } } \section{Changes in RcppArmadillo version 0.2.4 (2010-07-27)}{ \itemize{ \item Upgraded to Armadillo 0.9.52 'Monkey Wrench' \item src/fastLm.cpp: Switch from inv() to pinv() as inv() now tests for singular matrices and warns and returns an empty matrix which stops the example fastLm() implementation on the manual page -- and while this is generally reasonably it makes sense here to continue which the Moore-Penrose pseudo-inverse allows us to do this } } \section{Changes in RcppArmadillo version 0.2.3 (2010-06-14)}{ \itemize{ \item Better configuration to detect suncc (which does not have std::isfinite) } } \section{Changes in RcppArmadillo version 0.2.2 (2010-06-09)}{ \itemize{ \item Added RcppArmadillo:::CxxFlags for cases where RcppArmadillo is not used via a package \item Upgraded to Armadillo 0.9.10 'Chilli Espresso' \item Wrap support for mtOp, i.e. operations involving mixed types such as a complex and an arma::mat, which have been introduced in armadillo 0.9.10 \item Wrap support for mtGlue, i.e. operations involving matrices of mixed types such as an arma::mat and an arma::imat, which have been introduced in armadillo 0.9.10 \item Included an inline plugin to support the plugin system introduced in inline 0.3.5. The unit tests have moved from the src directory to the unit test directory (similar to Rcpp) using cxxfunction with the RcppArmadillo plugin. } } \section{Changes in RcppArmadillo version 0.2.1 (2010-05-19)}{ \itemize{ \item Bug-fix release permitting compilation on Windows } } \section{Changes in RcppArmadillo version 0.2.0 (2010-05-18)}{ \itemize{ \item fastLm() is now generic and has a formula interface as well as methods for print, summary, predict to behave like a standard model fitting function \item Armadillo sources (using release 0.9.8) are now included in the package using a standardized build suitable for our purposes (not assuming Boost or Atlas) -- see ?RcppArmadillo for details \item New R function RcppArmadillo.package.skeleton, similar to Rcpp::Rcpp.package.skeleton, but targetting use of RcppArmadillo } } \section{Changes in RcppArmadillo version 0.1.0 (2010-03-11)}{ \itemize{ \item the fastLm() implementation of a bare-bones lm() fit (using Armadillo's solve() function) provides an example of how efficient code can be written compactly using the combination of Rcpp, RcppAramadillo and Armadillo \item support for Rcpp implicit wrap of these types : Mat<T>, Col<T>, Row<T>, Cube<T> where T is one of : int, unsigned int, double, float \item support for Rcpp implicit as of these types : Mat<T>, Col<T>, Row<T> where R is one of : int, unsigned int, double, float } }
/packrat/lib/x86_64-apple-darwin19.4.0/4.0.4/RcppArmadillo/NEWS.Rd
no_license
marilotte/Pregancy_Relapse_Count_Simulation
R
false
false
89,884
rd
\name{NEWS} \title{News for Package \pkg{RcppArmadillo}} \newcommand{\ghpr}{\href{https://github.com/RcppCore/RcppArmadillo/pull/#1}{##1}} \newcommand{\ghit}{\href{https://github.com/RcppCore/RcppArmadillo/issues/#1}{##1}} \section{Changes in RcppArmadillo version 0.10.2.2.0 (2021-03-09)}{ \itemize{ \item Upgraded to Armadillo release 10.2.2 (Cicada Swarm) \itemize{ \item faster handling of subcubes \item added \code{tgamma()} \item added \code{.brief_print()} for abridged printing of matrices & cubes \item expanded forms of \code{trimatu()} and \code{trimatl()} with diagonal specification to handle sparse matrices \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional shift-invert mode } \item Removed \code{debian/} directory from repository as packaging is on salsa.debian.org. \item Relaxed tolerance on two \code{cube} tests on Windows to accomodate new 'gcc10-UCRT' builder. } } \section{Changes in RcppArmadillo version 0.10.2.1.0 (2021-02-09)}{ \itemize{ \item Upgraded to Armadillo release 10.2.1 (Cicada Swarm) \itemize{ \item faster handling of subcubes \item added \code{tgamma()} \item added \code{.brief_print()} for abridged printing of matrices & cubes \item expanded forms of \code{trimatu()} and \code{trimatl()} with diagonal specification to handle sparse matrices \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional shift-invert mode } } } \section{Changes in RcppArmadillo version 0.10.1.2.2 (2021-01-08)}{ \itemize{ \item Correct one unit test for \pkg{Matrix} 1.3.0-caused changed (Binxiang in \ghpr{319} and Dirk in \ghpr{322}). \item Suppress one further warning from \pkg{Matrix} (Dirk) \item Apply an upstream \code{NaN} correction (Conrad in \ghpr{321}) \item Added GitHub Actions CI using \code{run.sh} from r-ci (Dirk) } } \section{Changes in RcppArmadillo version 0.10.1.2.0 (2020-11-15)}{ \itemize{ \item Upgraded to Armadillo release 10.1.2 (Orchid Ambush) \item Remove three unused int constants (\ghit{313}) \item Include main armadillo header using quotes instead of brackets \item Rewrite version number use in old-school mode because gcc 4.8.5 \item Skipping parts of sparse conversion on Windows as win-builder fails } } \section{Changes in RcppArmadillo version 0.10.1.0.0 (2020-10-09)}{ \itemize{ \item Upgraded to Armadillo release 10.1.0 (Orchid Ambush) \itemize{ \item C++11 is now the minimum required C++ standard \item faster handling of compound expressions by \code{trimatu()} and \code{trimatl()} \item faster sparse matrix addition, subtraction and element-wise multiplication \item expanded sparse submatrix views to handle the non-contiguous form of \code{X.cols(vector_of_column_indices)} \item expanded \code{eigs_sym()} and \code{eigs_gen()} with optional fine-grained parameters (subspace dimension, number of iterations, eigenvalues closest to specified value) \item deprecated form of \code{reshape()} removed from Cube and SpMat classes \item ignore and warn on use of the \code{ARMA_DONT_USE_CXX11} macro } \item Switch Travis CI testing to focal and BSPM } } \section{Changes in RcppArmadillo version 0.9.900.3.0 (2020-09-02)}{ \itemize{ \item Upgraded to Armadillo release 9.900.3 (Nocturnal Misbehaviour) \itemize{ \item More efficient code for initialising matrices with \code{fill::zeros} \item Fixes for various error messages } } } \section{Changes in RcppArmadillo version 0.9.900.2.0 (2020-07-17)}{ \itemize{ \item Upgraded to Armadillo release 9.900.2 (Nocturnal Misbehaviour) \itemize{ \item In \code{sort()}, fixes for inconsistencies between checks applied to matrix and vector expressions \item In \code{sort()}, remove unnecessary copying when applied in-place to vectors function when applied in-place to vectors } } } \section{Changes in RcppArmadillo version 0.9.900.1.0 (2020-06-08)}{ \itemize{ \item Upgraded to Armadillo release 9.900.1 (Nocturnal Misbehaviour) \itemize{ \item faster \code{solve()} for under/over-determined systems \item faster \code{eig_gen()} and \code{eig_pair()} for large matrices \item expanded \code{eig_gen()} and \code{eig_pair()} to optionally provide left and right eigenvectors } \item Switch Travis CI testing to R 4.0.0, use bionic as base distro and test R 3.6.3 and 4.0.0 in a matrix (Dirk in \ghpr{298}). \item Add two badges to README for indirect use and the CSDA paper. \item Adapt \code{RcppArmadillo.package.skeleton()} to a change in R 4.0.0 affecting what it exports in \code{NAMESPACE}. } } \section{Changes in RcppArmadillo version 0.9.880.1.0 (2020-05-15)}{ \itemize{ \item Upgraded to Armadillo release 9.880.1 (Roasted Mocha Detox) \itemize{ \item expanded \code{qr()} to optionally use pivoted decomposition \item updated physical constants to NIST 2018 CODATA values \item added \code{ARMA_DONT_USE_CXX11_MUTEX} confguration option to disable use of \code{std::mutex} } \item OpenMP capability is tested explicitly (Kevin Ushey and Dirk in \ghpr{294}, \ghpr{295}, and \ghpr{296} all fixing \ghit{290}). } } \section{Changes in RcppArmadillo version 0.9.870.2.0 (2020-04-24)}{ \itemize{ \item Upgraded to Armadillo release 9.870.2 (Roasted Mocha Retox) \itemize{ \item faster handling of matrix multiplication expressions by \code{diagvec()} and \code{diagmat()} \item added \code{trimatu_ind()} and \code{trimatl_ind()} \item more consistent detection of sparse vector expressions } } } \section{Changes in RcppArmadillo version 0.9.860.2.0 (2020-04-13)}{ \itemize{ \item Upgraded to Armadillo release 9.860.2 (Roasted Mocha Fix) \itemize{ \item added \code{powmat()} \item faster access to columns in sparse submatrix views \item faster handling of relational expressions by \code{accu()} \item faster handling of sympd matrices by \code{expmat()}, \code{logmat()}, \code{sqrtmat()} \item workaround for save/load issues with HDF5 v1.12 } \item Vignettes are now pre-made and include (\ghpr{285}) \item Two test files are now skipped on 32-bit Windows } } \section{Changes in RcppArmadillo version 0.9.850.1.0 (2020-02-09)}{ \itemize{ \item Upgraded to Armadillo release 9.850.1 (Pyrocumulus Wrath) \itemize{ \item faster handling of compound expressions by \code{diagmat()} \item expanded \code{.save()} and \code{.load()} to handle CSV files with headers via csv_name(filename,header) specification \item added \code{log_normpdf()} \item added \code{.is_zero()} \item added \code{quantile()} } \item The sparse matrix test using scipy, if available, is now simplified thanks to recently added \CRANpkg{reticulate} conversions. } } \section{Changes in RcppArmadillo version 0.9.800.4.0 (2020-01-24)}{ \itemize{ \item Upgraded to Armadillo release 9.800.4 (Horizon Scraper) \itemize{ \item fixes for incorrect type promotion in \code{normpdf()} } } } \section{Changes in RcppArmadillo version 0.9.800.3.0 (2019-12-04)}{ \itemize{ \item Upgraded to Armadillo release 9.800.3 (Horizon Scraper) \itemize{ \item fixes for matrix row iterators \item better detection of non-hermitian matrices by \code{eig_sym()}, \code{inv_sympd()}, \code{chol()}, \code{expmat_sym()} } \item The \code{sample} function passes the prob vector as const allowing subsequent calls (Christian Gunning in \ghpr{276} fixing \ghit{275}) } } \section{Changes in RcppArmadillo version 0.9.800.1.0 (2019-10-09)}{ \itemize{ \item Upgraded to Armadillo release 9.800 (Horizon Scraper) \itemize{ \item faster \code{solve()} in default operation; iterative refinement is no longer applied by default; use \code{solve_opts::refine} to explicitly enable refinement \item faster \code{expmat()} \item faster handling of triangular matrices by \code{rcond()} \item added \code{.front()} and \code{.back()} \item added \code{.is_trimatu()} and \code{.is_trimatl()} \item added \code{.is_diagmat()} } \item The package now uses \pkg{tinytest} for unit tests (Dirk in \ghpr{269}). \item The \code{configure.ac} script is now more careful about shell portability (Min Kim in \ghpr{270}). } } \section{Changes in RcppArmadillo version 0.9.700.2.0 (2019-09-01)}{ \itemize{ \item Upgraded to Armadillo release 9.700.2 (Gangster Democracy) \itemize{ \item faster handling of cubes by \code{vectorise()} \item faster faster handling of sparse matrices by \code{nonzeros()} \item faster row-wise \code{index_min()} and \code{index_max()} \item expanded \code{join_rows()} and \code{join_cols()} to handle joining up to 4 matrices \item expanded \code{.save()} and \code{.load()} to allow storing sparse matrices in CSV format \item added \code{randperm()} to generate a vector with random permutation of a sequence of integers } \item Expanded the list of known good \code{gcc} and \code{clang} versions in \code{configure.ac} } } \section{Changes in RcppArmadillo version 0.9.600.4.0 (2019-07-14)}{ \itemize{ \item Upgraded to Armadillo release 9.600.4 (Napa Invasion) \itemize{ \item faster handling of sparse submatrices \item faster handling of sparse diagonal views \item faster handling of sparse matrices by \code{symmatu()} and \code{symmatl()} \item faster handling of sparse matrices by \code{join_cols()} \item expanded \code{clamp()} to handle sparse matrices \item added \code{.clean()} to replace elements below a threshold with zeros } } } \section{Changes in RcppArmadillo version 0.9.500.2.0 (2019-06-11)}{ \itemize{ \item Upgraded to Armadillo release 9.500.2 (Riot Compact) \itemize{ \item Expanded \code{solve()} with \code{solve_opts::likely_sympd} to indicate that the given matrix is likely positive definite \item more robust automatic detection of positive definite matrices by \code{solve()} and \code{inv()} \item faster handling of sparse submatrices \item expanded \code{eigs_sym()} to print a warning if the given matrix is not symmetric \item extended LAPACK function prototypes to follow Fortran passing conventions for so-called "hidden arguments", in order to address GCC Bug 90329; to use previous LAPACK function prototypes without the "hidden arguments", \code{#define ARMA_DONT_USE_FORTRAN_HIDDEN_ARGS before #include <armadillo> } } } } \section{Changes in RcppArmadillo version 0.9.400.3.0 (2019-05-09)}{ \itemize{ \item Upgraded to Armadillo release 9.400.3 (Surrogate Miscreant) \itemize{ \item check for symmetric / hermitian matrices (used by decomposition functions) has been made more robust \item \code{linspace()} and \code{logspace()} now honour requests for generation of vectors with zero elements \item fix for vectorisation / flattening of complex sparse matrices } } } \section{Changes in RcppArmadillo version 0.9.400.2.0 (2019-04-28)}{ \itemize{ \item Upgraded to Armadillo release 9.400.2 (Surrogate Miscreant) \itemize{ \item faster \code{cov()} and \code{cor()} \item added \code{.as_col()} and \code{.as_row()} \item expanded \code{.shed_rows()} / \code{.shed_cols()} / \code{.shed_slices()} to remove rows/columns/slices specified in a vector \item expanded \code{vectorise()} to handle sparse matrices \item expanded element-wise versions of \code{max()} and \code{min()} to handle sparse matrices \item optimised handling of sparse matrix expressions: \code{sparse \% (sparse +- scalar)} and \code{sparse / (sparse +- scalar)} \item expanded \code{eig_sym()}, \code{chol()}, \code{expmat_sym()}, \code{logmat_sympd()}, \code{sqrtmat_sympd()}, \code{inv_sympd()} to print a warning if the given matrix is not symmetric \item more consistent detection of vector expressions } } } \section{Changes in RcppArmadillo version 0.9.300.2.0 (2019-03-21)}{ \itemize{ \item Upgraded to Armadillo release 9.300.2 (Fomo Spiral) \itemize{ \item Faster handling of compound complex matrix expressions by \code{trace()} \item More efficient handling of element access for inplace modifications in sparse matrices \item Added \code{.is_sympd()} to check whether a matrix is symmetric/hermitian positive definite \item Added \code{interp2()} for 2D data interpolation \item Added \code{expm1()} and \code{log1p()} \item Expanded \code{.is_sorted()} with options "strictascend" and "strictdescend" \item Expanded \code{eig_gen()} to optionally perform balancing prior to decomposition } } } \section{Changes in RcppArmadillo version 0.9.200.7.1 (2019-03-08)}{ \itemize{ \item Explicit setting of \code{RNGversion("3.5.0")} in one unit test to accomodate the change in \code{sample()} in R 3.6.0 \item Back-ported a fix to the Wishart RNG from upstream (Dirk in \ghpr{248} fixing \ghit{247}) } } \section{Changes in RcppArmadillo version 0.9.200.7.0 (2019-01-17)}{ \itemize{ \item Upgraded to Armadillo release 9.200.7 (Carpe Noctem) \item Fixes in 9.200.7 compared to 9.200.5: \itemize{ \item handling complex compound expressions by \code{trace()} \item handling \code{.rows()} and \code{.cols()} by the \code{Cube} class } } } \section{Changes in RcppArmadillo version 0.9.200.5.0 (2018-11-27)}{ \itemize{ \item Upgraded to Armadillo release 9.200.5 (Carpe Noctem) \item Changes in this release \itemize{ \item linking issue when using fixed size matrices and vectors \item faster handling of common cases by \code{princomp()} } } } \section{Changes in RcppArmadillo version 0.9.200.4.0 (2018-11-09)}{ \itemize{ \item Upgraded to Armadillo release 9.200.4 (Carpe Noctem) \itemize{ \item faster handling of symmetric positive definite matrices by \code{rcond()} \item faster transpose of matrices with size ≥ 512x512 \item faster handling of compound sparse matrix expressions by \code{accu()}, \code{diagmat()}, \code{trace()} \item faster handling of sparse matrices by \code{join_rows()} \item expanded \code{sign()} to handle scalar arguments \item expanded operators (\code{*}, \code{\%}, \code{+}, \code{−}) to handle sparse matrices with differing element types (eg. multiplication of complex matrix by real matrix) \item expanded \code{conv_to()} to allow conversion between sparse matrices with differing element types \item expanded \code{solve()} to optionally allow keeping solutions of systems singular to working precision \item workaround for \code{gcc} and \code{clang} bug in C++17 mode } \item Commented-out sparse matrix test consistently failing on the fedora-clang machine CRAN, and only there. No fix without access. \item The 'Unit test' vignette is no longer included. } } \section{Changes in RcppArmadillo version 0.9.100.5.0 (2018-08-16)}{ \itemize{ \item Upgraded to Armadillo release 9.100.4 (Armatus Ad Infinitum) \itemize{ \item faster handling of symmetric/hermitian positive definite matrices by \code{solve()} \item faster handling of \code{inv_sympd()} in compound expressions \item added \code{.is_symmetric()} \item added \code{.is_hermitian()} \item expanded \code{spsolve()} to optionally allow keeping solutions of systems singular to working precision \item new configuration options \code{ARMA_OPTIMISE_SOLVE_BAND} and \code{ARMA_OPTIMISE_SOLVE_SYMPD} smarter use of the element cache in sparse matrices \item smarter use of the element cache in sparse matrices } \item Aligned OpenMP flags in the RcppArmadillo.package.skeleton used Makevars{,.win} to not use one C and C++ flag. } } \section{Changes in RcppArmadillo version 0.8.600.0.0 (2018-06-28)}{ \itemize{ \item Upgraded to Armadillo release 8.600.0 (Sabretooth Rugrat) \itemize{ \item added \code{hess()} for Hessenberg decomposition \item added \code{.row()}, \code{.rows()}, \code{.col()}, \code{.cols()} to subcube views \item expanded \code{.shed_rows()} and \code{.shed_cols()} to handle cubes \item expanded \code{.insert_rows()} and \code{.insert_cols()} to handle cubes \item expanded subcube views to allow non-contiguous access to slices \item improved tuning of sparse matrix element access operators \item faster handling of tridiagonal matrices by \code{solve()} \item faster multiplication of matrices with differing element types when using OpenMP } } } \section{Changes in RcppArmadillo version 0.8.500.1.1 (2018-05-17) [GH only]}{ \itemize{ \item Upgraded to Armadillo release 8.500.1 (Caffeine Raider) \itemize{ \item bug fix for banded matricex } \item Added \code{slam} to Suggests: as it is used in two unit test functions [CRAN requests] \item The \code{RcppArmadillo.package.skeleton()} function now works with \code{example_code=FALSE} when \CRANpkg{pkgKitten} is present (Santiago Olivella in \ghpr{231} fixing \ghpr{229}) \item The LAPACK tests now cover band matrix solvers (Keith O'Hara in \ghpr{230}). } } \section{Changes in RcppArmadillo version 0.8.500.0 (2018-04-21)}{ \itemize{ \item Upgraded to Armadillo release 8.500 (Caffeine Raider) \itemize{ \item faster handling of sparse matrices by \code{kron()} and \code{repmat()} \item faster transpose of sparse matrices \item faster element access in sparse matrices \item faster row iterators for sparse matrices \item faster handling of compound expressions by \code{trace()} \item more efficient handling of aliasing in submatrix views \item expanded \code{normalise()} to handle sparse matrices \item expanded \code{.transform()} and \code{.for_each()} to handle sparse matrices \item added \code{reverse()} for reversing order of elements \item added \code{repelem()} for replicating elements \item added \code{roots()} for finding the roots of a polynomial } \item Fewer LAPACK compile-time guards are used, new unit tests for underlying features have been added (Keith O'Hara in \ghpr{211} addressing \ghit{207}). \item The configure check for LAPACK features has been updated accordingly (Keith O'Hara in \ghpr{214} addressing \ghit{213}). \item The compile-time check for \code{g++} is now more robust to minimal shell versions (\ghpr{217} addressing \ghit{216}). \item Compiler tests to were added for macOS (Keith O'Hara in \ghpr{219}). } } \section{Changes in RcppArmadillo version 0.8.400.0.0 (2018-02-19)}{ \itemize{ \item Upgraded to Armadillo release 8.400.0 (Entropy Bandit) \itemize{ \item faster handling of sparse matrices by \code{repmat()} \item faster loading of CSV files \item expanded \code{kron()} to handle sparse matrices \item expanded \code{index_min()} and \code{index_max()} to handle cubes \item expanded \code{randi()}, \code{randu()}, \code{randn()}, \code{randg()} to output single scalars \item added submatrix & subcube iterators \item added \code{normcdf()} \item added \code{mvnrnd()} \item added \code{chi2rnd()} \item added \code{wishrnd()} and \code{iwishrnd()} } \item The \code{configure} generated header settings for LAPACK and OpenMP can be overridden by the user. \item This release was preceded by two release candidates which were tested extensively. } } \section{Changes in RcppArmadillo version 0.8.300.1.0 (2017-12-04)}{ \itemize{ \item Upgraded to Armadillo release 8.300.1 (Tropical Shenanigans) \itemize{ \item faster handling of band matrices by \code{solve()} \item faster handling of band matrices by \code{chol()} \item faster \code{randg()} when using OpenMP \item added \code{normpdf()} \item expanded \code{.save()} to allow appending new datasets to existing HDF5 files } \item Includes changes made in several earlier GitHub-only releases (versions 0.8.300.0.0, 0.8.200.2.0 and 0.8.200.1.0). \item Conversion from \code{simple_triplet_matrix} is now supported (Serguei Sokol in \ghpr{192}). \item Updated configure code to check for g++ 5.4 or later to enable OpenMP. \item Updated the skeleton package to current packaging standards \item Suppress warnings from Armadillo about missing OpenMP support and \code{-fopenmp} flags by setting \code{ARMA_DONT_PRINT_OPENMP_WARNING} } } \section{Changes in RcppArmadillo version 0.8.100.1.0 (2017-10-10)}{ \itemize{ \item Upgraded to Armadillo release 8.100.1 (Feral Pursuits) \itemize{ \item faster incremental construction of sparse matrices via element access operators \item faster diagonal views in sparse matrices \item expanded \code{SpMat} to save/load sparse matrices in coord format \item expanded \code{.save()},\code{.load()} to allow specification of datasets within HDF5 files \item added \code{affmul()} to simplify application of affine transformations \item warnings and errors are now printed by default to the \code{std::cerr} stream \item added \code{set_cerr_stream()} and \code{get_cerr_stream()} to replace \code{set_stream_err1()}, \code{set_stream_err2()}, \code{get_stream_err1()}, \code{get_stream_err2()} \item new configuration options \code{ARMA_COUT_STREAM} and \code{ARMA_CERR_STREAM} } \item Constructors for sparse matrices of types \code{dgt}, \code{dtt} amd \code{dst} now use Armadillo code for improved performance (Serguei Sokol in \ghpr{175} addressing \ghit{173}) \item Sparse matrices call \code{.sync()} before accessing internal arrays (Binxiang Ni in \ghpr{171}) \item The sparse matrix vignette has been converted to Rmarkdown using the pinp package, and is now correctly indexed. (\ghpr{176}) } } \section{Changes in RcppArmadillo version 0.7.960.1.2 (2017-08-29)}{ \itemize{ \item On macOS, OpenMP support is now turned off (\ghpr{170}). \item The package is now compiling under the C++11 standard (\ghpr{170}). \item The vignette dependency are correctly set (James and Dirk in \ghpr{168} and \ghpr{169}) } } \section{Changes in RcppArmadillo version 0.7.960.1.1 (2017-08-20)}{ \itemize{ \item Added improved check for inherited S4 matrix classes (\ghpr{162} fixing \ghit{161}) \item Changed \code{fastLm} C++ function to \code{fastLm_impl} to not clash with R method (\ghpr{164} fixing \ghpr{163}) \item Added OpenMP check for \code{configure} (\ghpr{166} fixing \ghit{165}) } } \section{Changes in RcppArmadillo version 0.7.960.1.0 (2017-08-11)}{ \itemize{ \item Upgraded to Armadillo release 7.960.1 (Northern Banana Republic Deluxe) \itemize{ \item faster \code{randn()} when using OpenMP (NB: usually omitted when used fromR) \item faster \code{gmm_diag} class, for Gaussian mixture models with diagonal covariance matrices \item added \code{.sum_log_p()} to the \code{gmm_diag} class \item added \code{gmm_full} class, for Gaussian mixture models with full covariance matrices \item expanded \code{.each_slice()} to optionally use OpenMP for multi-threaded execution } \item Upgraded to Armadillo release 7.950.0 (Northern Banana Republic) \itemize{ \item expanded \code{accu()} and \code{sum()} to use OpenMP for processing expressions with computationally expensive element-wise functions \item expanded \code{trimatu()} and \code{trimatl()} to allow specification of the diagonal which delineates the boundary of the triangular part } \item Enhanced support for sparse matrices (Binxiang Ni as part of Google Summer of Code 2017) \itemize{ \item Add support for \code{dtCMatrix} and \code{dsCMatrix} (\ghpr{135}) \item Add conversion and unit tests for \code{dgT}, \code{dtT} and \code{dsTMatrix} (\ghpr{136}) \item Add conversion and unit tests for \code{dgR}, \code{dtR} and \code{dsRMatrix} (\ghpr{139}) \item Add conversion and unit tests for \code{pMatrix} and \code{ddiMatrix} (\ghpr{140}) \item Rewrite conversion for \code{dgT}, \code{dtT} and \code{dsTMatrix}, and add file-based tests (\ghpr{142}) \item Add conversion and unit tests for \code{indMatrix} (\ghpr{144}) \item Rewrite conversion for \code{ddiMatrix} (\ghpr{145}) \item Add a warning message for matrices that cannot be converted (\ghpr{147}) \item Add new vignette for sparse matrix support (\ghpr{152}; Dirk in \ghpr{153}) \item Add support for sparse matrix conversion from Python SciPy (\ghpr{158} addressing \ghit{141}) } \item Optional return of row or column vectors in collapsed form if appropriate \code{#define} is set (Serguei Sokol in \ghpr{151} and \ghpr{154}) \item Correct \code{speye()} for non-symmetric cases (Qiang Kou in \ghpr{150} closing \ghit{149}). \item Ensure tests using Scientific Python and reticulate are properly conditioned on the packages being present. \item Added \code{.aspell/} directory with small local directory now supported by R-devel. } } \section{Changes in RcppArmadillo version 0.7.900.2.0 (2017-06-02)}{ \itemize{ \item Upgraded to Armadillo release 7.900.2 (Evil Banana Republic) \itemize{ \item Expanded \code{clamp()} to handle cubes \item Computationally expensive element-wise functions (such as \code{exp()}, \code{log()}, \code{cos()}, etc) can now be automatically sped up via OpenMP; this requires a C++11/C++14 compiler with OpenMP 3.0+ support for GCC and clang compilers \item One caveat: when using GCC, use of \code{-march=native} in conjunction with \code{-fopenmp} may lead to speed regressions on recent processors } \item Added gcc 7 to support compiler check (James Balamuta in \ghpr{128} addressing \ghit{126}). \item A unit test helper function for \code{rmultinom} was corrected (\ghpr{133}). \item OpenMP support was added to the skeleton helper in \code{inline.R} } } \section{Changes in RcppArmadillo version 0.7.800.2.0 (2017-04-12)}{ \itemize{ \item Upgraded to Armadillo release 7.800.2 (Rogue State Redux) \itemize{ \item The Armadillo license changed to Apache License 2.0 } \item The \code{DESCRIPTION} file now mentions the Apache License 2.0, as well as the former MPL2 license used for earlier releases. \item A new file \code{init.c} was added with calls to \code{R_registerRoutines()} and \code{R_useDynamicSymbols()} \item Symbol registration is enabled in \code{useDynLib} \item The \code{fastLm} example was updated } } \section{Changes in RcppArmadillo version 0.7.700.0.0 (2017-02-07)}{ \itemize{ \item Upgraded to Armadillo release 7.700.0 (Rogue State) \itemize{ \item added \code{polyfit()} and \code{polyval()} \item added second form of \code{log_det()} to directly return the result as a complex number \item added \code{range()} to statistics functions \item expanded \code{trimatu()}/\code{trimatl()} and \code{symmatu()}/\code{symmatl()} to handle sparse matrice } } } \section{Changes in RcppArmadillo version 0.7.600.2.0 (2017-01-05)}{ \itemize{ \item Upgraded to Armadillo release 7.600.2 (Coup d'Etat Deluxe) \itemize{ \item Bug fix to memory allocation for \code{fields} } } } \section{Changes in RcppArmadillo version 0.7.600.1.0 (2016-12-16)}{ \itemize{ \item Upgraded to Armadillo release 7.600.1 (Coup d'Etat Deluxe) \itemize{ \item more accurate \code{eigs_sym()} and \code{eigs_gen()} \item expanded \code{floor()}, \code{ceil()}, \code{round()}, \code{trunc()}, \code{sign()} to handle sparse matrices \item added \code{arg()}, \code{atan2()}, \code{hypot()} } } } \section{Changes in RcppArmadillo version 0.7.500.1.0 (2016-11-11)}{ \itemize{ \item Upgraded to Armadillo release 7.500.1 \item Small improvement to return value treatment \item The \code{sample.h} extension was updated to the newer Armadillo interface. (Closes \ghit{111}) } } \section{Changes in RcppArmadillo version 0.7.500.0.0 (2016-10-20)}{ \itemize{ \item Upgraded to Armadillo release 7.500.0 (Coup d'Etat) \itemize{ \item Expanded \code{qz()} to optionally specify ordering of the Schur form \item Expanded \code{each_slice()} to support matrix multiplication } } } \section{Changes in RcppArmadillo version 0.7.400.2.0 (2016-08-24)}{ \itemize{ \item Upgraded to Armadillo release 7.400.2 (Feral Winter Deluxe) \itemize{ \item added \code{expmat_sym()}, \code{logmat_sympd()}, \code{sqrtmat_sympd()} \item added \code{.replace()} } } } \section{Changes in RcppArmadillo version 0.7.300.1.0 (2016-07-30)}{ \itemize{ \item Upgraded to Armadillo release 7.300.1 \itemize{ \item added \code{index_min()} and \code{index_max()} standalone functions \item expanded \code{.subvec()} to accept \code{size()} arguments \item more robust handling of non-square matrices by \code{lu()} } } } \section{Changes in RcppArmadillo version 0.7.200.2.0 (2016-07-22)}{ \itemize{ \item Upgraded to Armadillo release 7.200.2 \item The sampling extension was rewritten to use Armadillo vector types instead of Rcpp types (PR \ghpr{101} by James Balamuta) } } \section{Changes in RcppArmadillo version 0.7.200.1.0 (2016-06-06)}{ \itemize{ \item Upgraded to Armadillo release 7.200.1 \itemize{ \item added \code{.index_min()} and \code{.index_max()} \item expanded \code{ind2sub()} to handle vectors of indices \item expanded \code{sub2ind()} to handle matrix of subscripts \item expanded \code{expmat()}, \code{logmat()} and \code{sqrtmat()} to optionally return a bool indicating success \item faster handling of compound expressions by \code{vectorise()} } \item The \code{configure} code now (once again) sets the values for the LAPACK feature \code{#define} correctly. } } \section{Changes in RcppArmadillo version 0.7.100.3.0 (2016-05-25)}{ \itemize{ \item Upgraded to Armadillo test release 7.100.3 \itemize{ \item added \code{erf()}, \code{erfc()}, \code{lgamma()} \item added \code{.head_slices()} and \code{.tail_slices()} to subcube views \item \code{spsolve()} now requires SuperLU 5.2 \item \code{eigs_sym()}, \code{eigs_gen()} and \code{svds()} now use a built-in reimplementation of ARPACK for real (non-complex) matrices (code contributed by Yixuan Qiu) } \item The \code{configure} code now checks against old \code{g++} version which are no longer sufficient to build the package. } } \section{Changes in RcppArmadillo version 0.6.700.6.0 (2016-05-05)}{ \itemize{ \item Upgraded to Armadillo 6.700.6 (Catabolic Amalgamator Deluxe) \itemize{ \item fix for handling empty matrices by \code{kron()} \item fix for clang warning in advanced matrix constructors \item fix for false deprecated warning in \code{trunc_log()} and \code{trunc_exp()} \item fix for gcc-6.1 warning about misleading indentation \item corrected documentation for the \code{solve()} function } \item Added support for \code{int64_t} (\code{ARMA_64BIT_WORD}) when required during compilation time. (PR \ghpr{90} by George G. Vega Yon, fixing \ghpr{88}) \item Fixed bug in \code{SpMat} exporter (PR \ghpr{91} by George G. Vega Yon, fixing \ghit{89} and \ghit{72}) } } \section{Changes in RcppArmadillo version 0.6.700.3.0 (2016-04-05)}{ \itemize{ \item Upgraded to Armadillo 6.700.3 (Catabolic Amalgamator Deluxe) \itemize{ \item added \code{logmat()} for calcuating the matrix logarithm \item added \code{regspace()} for generating vectors with regularly spaced elements \item added \code{logspace()} for generating vectors with logarithmically spaced elements \item added \code{approx_equal()} for determining approximate equality \item added \code{trapz()} for numerical integration \item expanded \code{.save()} and \code{.load()} with hdf5_binary_trans file type, to save/load data with columns transposed to rows } } } \section{Changes in RcppArmadillo version 0.6.600.4.0 (2016-03-15)}{ \itemize{ \item Upgraded to Armadillo 6.600.4 (Catabolic Amalgamator) \itemize{ \item expanded \code{sum()}, \code{mean()}, \code{min()}, \code{max()} to handle cubes \item expanded \code{Cube} class to handle arbitrarily sized empty cubes (eg. 0x5x2) \item added \code{shift()} for circular shifts of elements \item added \code{sqrtmat()} for finding the square root of a matrix \item fix for \code{gmm_diag} when using Mahalanobis distance } \item The \code{configure} script now reflects the full LAPACK fallback offered by R 3.3.0 or later (PR \ghpr{81}) } } \section{Changes in RcppArmadillo version 0.6.500.4.0 (2016-01-26)}{ \itemize{ \item Upgraded to Armadillo 6.500.4 (Gourmet Electron Jumper) \itemize{ \item added \code{conv2()} for 2D convolution \item added stand-alone \code{kmeans()} function for clustering data \item added \code{trunc()} \item extended \code{conv()} to optionally provide central convolution \item faster handling of multiply-and-accumulate by \code{accu()} when using Intel MKL, ATLAS or OpenBLAS } \item The \code{configure} script now uses \code{#!/usr/bin/env bash} to cope with systems that do not have \code{#!/bin/bash} (PR \ghpr{75} fixing issue \ghpr{74}) \item RcppArmadillo now defines ARMA_32BIT_WORD to ensure we always use integer vectors that be passed to R } } \section{Changes in RcppArmadillo version 0.6.400.2.0 (2015-12-15)}{ \itemize{ \item Upgraded to Armadillo 6.400.2 ("Flying Spaghetti Monster Deluxe") \itemize{ \item expanded \code{each_col()}, \code{each_row()} and \code{each_slice()} to handle C++11 lambda functions \item added \code{ind2sub()} and \code{sub2ind()} \item fixes for corner cases in gmm_diag class } } } \section{Changes in RcppArmadillo version 0.6.300.2.2 (2015-12-12)}{ \itemize{ \item Upgraded to Armadillo 6.300.3-test ("Flying Spaghetti Monster") \itemize{ \item Additional test in \code{auxlib_meat.hpp} for limited LAPACK } \item Updated test and \code{#define} for limited LAPACK version R might be built with on Unix-alike systems } } \section{Changes in RcppArmadillo version 0.6.300.2.0 (2015-12-03)}{ \itemize{ \item Upgraded to Armadillo 6.300.2 ("Flying Spaghetti Monster") \itemize{ \item expanded \code{solve()} to find approximate solutions for rank-deficient systems \item faster handling of non-contiguous submatrix views in compound expressions \item added \code{.for_each()} to Mat, Row, Col, Cube and field classes \item added \code{rcond()} for estimating the reciprocal condition number \item fixes for \code{spsolve()}, \code{eigs_sym()}, \code{eigs_gen()}, \code{svds()} } \item Added support for \code{Cube} types via \code{as<>} converters (PR \ghpr{64} by Nathan Russell, fixing \ghit{63} and \ghit{42}) } } \section{Changes in RcppArmadillo version 0.6.200.2.0 (2015-10-31)}{ \itemize{ \item Upgraded to Armadillo 6.200.0 ("Midnight Blue Deluxe") \itemize{ \item expanded \code{diagmat()} to handle non-square matrices and arbitrary diagonals \item expanded \code{trace()} to handle non-square matrices } } } \section{Changes in RcppArmadillo version 0.6.100.0.0 (2015-10-03)}{ \itemize{ \item Upgraded to Armadillo 6.100.0 ("Midnight Blue") \itemize{ \item faster \code{norm()} and \code{normalise()} when using ATLAS or OpenBLAS \item added Schur decomposition: \code{schur()} \item stricter handling of matrix objects by \code{hist()} and \code{histc()} \item advanced constructors for using auxiliary memory by Mat, Col, Row and Cube now have the default of \emph{strict = false} \item Cube class now delays allocation of .slice() related structures until needed \item expanded \code{join_slices()} to handle joining cubes with matrices } } } \section{Changes in RcppArmadillo version 0.6.000.1.0 (2015-09-25)}{ \itemize{ \item Upgraded to Armadillo test / bug-fix release 0.6.000.1-test \item Non-CRAN release } } \section{Changes in RcppArmadillo version 0.5.600.2.0 (2015-09-19)}{ \itemize{ \item Upgraded to Armadillo 5.600.2 ("Molotov Cocktail Deluxe") \itemize{ \item expanded \code{.each_col()} and \code{.each_row()} to handle out-of-place operations \item added \code{.each_slice()} for repeated matrix operations on each slice of a cube \item faster handling of compound expressions by \code{join_rows()} and \code{join_cols()} } } } \section{Changes in RcppArmadillo version 0.5.500.2.0 (2015-09-03)}{ \itemize{ \item Upgraded to Armadillo 5.500.2 ("Molotov Cocktail") \itemize{ \item expanded object constructors and generators to handle \code{size()} based specification of dimensions \item faster handling of submatrix rows \item faster \code{clamp()} \item fixes for handling sparse matrices } } } \section{Changes in RcppArmadillo version 0.5.400.2.0 (2015-08-17)}{ \itemize{ \item Upgraded to Armadillo 5.400.2 ("Plutocracy Incorporated Deluxe") \itemize{ \item added \code{find_unique()} for finding indices of unique values \item added \code{diff()} for calculating differences between consecutive elements \item added \code{cumprod()} for calculating cumulative product \item added \code{null()} for finding the orthonormal basis of null space \item expanded \code{interp1()} to handle repeated locations \item expanded \code{unique()} to handle complex numbers \item faster \code{flupud()} \item faster row-wise \code{cumsum()} \item fix for k-means clustering in gmm_diag class } \item corrected use of \code{kitten()} thanks to Grant Brown } } \section{Changes in RcppArmadillo version 0.5.300.4 (2015-08-03)}{ \itemize{ \item Upgraded to Armadillo 5.300.4 ("Plutocrazy Incorporated") \itemize{ \item added generalised Schur decomposition: \code{qz()} \item added \code{.has_inf()} and \code{.has_nan()} \item expanded \code{interp1()} to handle out-of-domain locations \item expanded sparse matrix class with \code{.set_imag()} and \code{.set_real()} \item expanded \code{imag()}, \code{real()} and \code{conj()} to handle sparse matrices \item expanded \code{diagmat()}, \code{reshape()} and \code{resize()} to handle sparse matrices \item faster sparse \code{sum()} \item faster row-wise \code{sum()}, \code{mean()}, \code{min()}, \code{max()} \item updated physical constants to NIST 2014 CODATA values \item fixes for handling sparse submatrix views \item Armadillo can make use of GPUs by linking with NVIDIA NVBLAS (a GPU-accelerated implementation of BLAS), or by linking with AMD ACML (which can use GPUs via OpenCL) } \item Added \code{importFrom} statements for R functions not from base \item Added explicit \code{Rcpp::sourceCpp()} reference as well \item Updated one formatting detail in vignette to please TeXlive2015 } } \section{Changes in RcppArmadillo version 0.5.200.1.0 (2015-06-04)}{ \itemize{ \item Upgraded to Armadillo release 5.200.1 ("Boston Tea Smuggler") \itemize{ \item added \code{orth()} for finding the orthonormal basis of the range space of a matrix \item expanded element initialisation to handle nested initialiser lists (C++11) \item workarounds for bugs in GCC, Intel and MSVC C++ compilers } \item Added another example to \code{inst/examples/fastLm.r} } } \section{Changes in RcppArmadillo version 0.5.100.2.0 (2015-05-12)}{ \itemize{ \item Upgraded to Armadillo test / bug-fix release 5.100.2 \item Non-CRAN release } } \section{Changes in RcppArmadillo version 0.5.100.1.0 (2015-05-01)}{ \itemize{ \item Upgraded to Armadillo release 5.100.1 ("Ankle Biter Deluxe") \itemize{ \item added \code{interp1()} for 1D interpolation \item added \code{.is_sorted()} for checking whether a vector or matrix has sorted elements \item updated physical constants to NIST 2010 CODATA values } } } \section{Changes in RcppArmadillo version 0.5.000.0 (2015-04-12)}{ \itemize{ \item Upgraded to Armadillo release Version 5.000 ("Ankle Biter") \itemize{ \item added \code{spsolve()} for solving sparse systems of linear equations \item added \code{svds()} for singular value decomposition of sparse matrices \item added \code{nonzeros()} for extracting non-zero values from matrices \item added handling of diagonal views by sparse matrices \item expanded \code{repmat()} to handle sparse matrices \item expanded \code{join_rows()} and \code{join_cols()} to handle sparse matrices \item \code{sort_index()} and \code{stable_sort_index()} have been placed in the delayed operations framework for increased efficiency \item use of 64 bit integers is automatically enabled when using a C++11 compiler \item workaround for a bug in recent releases of Apple Xcode \item workaround for a bug in LAPACK 3.5 } } } \section{Changes in RcppArmadillo version 0.4.999.1.0 (2015-04-04)}{ \itemize{ \item Upgraded to Armadillo release preview 4.999.1 \item Non-CRAN test release } } \section{Changes in RcppArmadillo version 0.4.650.1.1 (2015-02-25)}{ \itemize{ \item Upgraded to Armadillo release Version 4.650.1 ("Intravenous Caffeine Injector") \itemize{ \item added \code{randg()} for generating random values from gamma distributions (C++11 only) \item added \code{.head_rows()} and \code{.tail_rows()} to submatrix views \item added \code{.head_cols()} and \code{.tail_cols()} to submatrix views \item expanded \code{eigs_sym()} to optionally calculate eigenvalues with smallest/largest algebraic values fixes for handling of sparse matrices } \item Applied small correction to main header file to set up C++11 RNG whether or not the alternate RNG (based on R, our default) is used } } \section{Changes in RcppArmadillo version 0.4.600.4.0 (2015-01-23)}{ \itemize{ \item Upgraded to Armadillo release Version 4.600.4 (still "Off The Reservation") \itemize{ \item Speedups in the transpose operation \item Small bug fixes } } } \section{Changes in RcppArmadillo version 0.4.600.0 (2014-12-27)}{ \itemize{ \item Upgraded to Armadillo release Version 4.600 ("Singapore Sling Deluxe") \itemize{ \item added \code{.head()} and \code{.tail()} to submatrix views \item faster matrix transposes within compound expressions \item faster \code{accu()} and \code{norm()} when compiling with -O3 -ffast-math -march=native (gcc and clang) \item workaround for a bug in GCC 4.4 } } } \section{Changes in RcppArmadillo version 0.4.550.2.0 (2014-12-02)}{ \itemize{ \item Upgraded to Armadillo release Version 4.550.2 ("Singapore Sling Deluxe") \itemize{ \item Bug fixes for implicit template initiation for \code{std::pow()} seen with the old g++ 4.4* series } } } \section{Changes in RcppArmadillo version 0.4.550.1.0 (2014-11-26)}{ \itemize{ \item Upgraded to Armadillo release Version 4.550.1 ("Singapore Sling Deluxe") \itemize{ \item added matrix exponential function: \code{expmat()} \item faster \code{.log_p()} and \code{.avg_log_p()} functions in the \code{gmm_diag} class when compiling with OpenMP enabled \item faster handling of in-place addition/subtraction of expressions with an outer product \item applied correction to \code{gmm_diag} relative to the 4.550 release } \item The Armadillo Field type is now converted in \code{as<>} conversions } } \section{Changes in RcppArmadillo version 0.4.500.0 (2014-10-30)}{ \itemize{ \item Upgraded to Armadillo release Version 4.500 ("Singapore Sling") \itemize{ \item faster handling of complex vectors by \code{norm()} \item expanded \code{chol()} to optionally specify output matrix as upper or lower triangular \item better handling of non-finite values when saving matrices as text files } \item The \code{sample} functionality has been extended to provide the Walker Alias method (including new unit tests) via a pull request by Christian Gunning } } \section{Changes in RcppArmadillo version 0.4.450.1.0 (2014-09-21)}{ \itemize{ \item Upgraded to Armadillo release Version 4.450.1 (Spring Hill Fort) \itemize{ \item faster handling of matrix transposes within compound expressions \item expanded \code{symmatu()}/\code{symmatl()} to optionally disable taking the complex conjugate of elements \item expanded \code{sort_index()} to handle complex vectors \item expanded the \code{gmm_diag} class with functions to generate random samples } \item A new random-number implementation for Armadillo uses the RNG from R as a fallback (when C++11 is not selected so the C++11-based RNG is unavailable) which avoids using the older C++98-based \code{std::rand} \item The \code{RcppArmadillo.package.skeleton()} function was updated to only set an "Imports:" for Rcpp, but not RcppArmadillo which (as a template library) needs only LinkingTo: \item The \code{RcppArmadillo.package.skeleton()} function will now prefer \code{pkgKitten::kitten()} over \code{package.skeleton()} in order to create a working package which passes \code{R CMD check}. \item The \CRANpkg{pkgKitten} package is now a \code{Suggests:} \item A manual page was added to provide documentation for the functions provided by the skeleton package. \item A small update was made to the package manual page. } } \section{Changes in RcppArmadillo version 0.4.400.0 (2014-08-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.400 (Winter Shark Alley) \itemize{ \item added \code{gmm_diag} class for statistical modelling using Gaussian Mixture Models; includes multi-threaded implementation of k-means and Expectation-Maximisation for parameter estimation \item added \code{clamp()} for clamping values to be between lower and upper limits \item expanded batch insertion constructors for sparse matrices to add values at repeated locations \item faster handling of subvectors by \code{dot()} \item faster handling of aliasing by submatrix views } \item Corrected a bug (found by the g++ Address Sanitizer) in sparse matrix initialization where space for a sentinel was allocated, but the sentinel was not set; with extra thanks to Ryan Curtin for help \item Added a few unit tests for sparse matrices } } \section{Changes in RcppArmadillo version 0.4.320.0 (2014-07-03)}{ \itemize{ \item Upgraded to Armadillo release Version 4.320 (Daintree Tea Raider) \itemize{ \item expanded \code{eigs_sym()} and \code{eigs_gen()} to use an optional tolerance parameter \item expanded \code{eig_sym()} to automatically fall back to standard decomposition method if divide-and-conquer fails \item automatic installer enables use of C++11 random number generator when using gcc 4.8.3+ in C++11 mode } } } \section{Changes in RcppArmadillo version 0.4.300.8.0 (2014-05-31)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.8 (Medieval Cornea Scraper) \itemize{ \item More robust \code{norm}-related functions \item Fixes between interactions between \code{cube} and \code{vector} types. } \item Adds a \code{#define ARMA_DONT_USE_CXX11} to provide an option to turn C++11 off for Armadillo (but client packages may still use it) \item More robust Windows detection by using \code{_WIN32} as well as \code{WIN32} as the latter gets diabled by MinGW with C++11 \item On Windows, C++11 is turned off as the Armadillo code base uses more features of C++11 than g++ 4.6.2 version in Rtools implements } } \section{Changes in RcppArmadillo version 0.4.300.5.0 (2014-05-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.5 (Medieval Cornea Scraper) \itemize{ \item Handle possible underflows and overflows in \code{norm()}, \code{normalise()}, \code{norm_dot()} \item Fix for handling of null vectors by \code{norm_dot()} } } } \section{Changes in RcppArmadillo version 0.4.300.2.0 (2014-05-13)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300.2 (Medieval Cornea Scraper) \itemize{ \item faster \code{find()} } } } \section{Changes in RcppArmadillo version 0.4.300.0 (2014-05-04)}{ \itemize{ \item Upgraded to Armadillo release Version 4.300 (Medieval Cornea Scraper) \itemize{ \item faster \code{find()} \item added \code{find_finite()} and \code{find_nonfinite()} for finding indices of finite and non-finite elements \item expressions \code{X=inv(A)*B*C} and \code{X=A.i()*B*C} are automatically converted to X=solve(A,B*C) } \item Corrected conversion to \code{unsigned int} vectors and matrices \item Configure script now checks for R version 3.0.3 or newer to enable complex divide-and-conquer SVD in case of R-supplied LAPACK } } \section{Changes in RcppArmadillo version 0.4.200.0 (2014-04-07)}{ \itemize{ \item Upgraded to Armadillo release Version 4.200 (Flintlock Swoop) \itemize{ \item faster transpose of sparse matrices \item more efficient handling of aliasing during matrix multiplication \item faster inverse of matrices marked as diagonal } } } \section{Changes in RcppArmadillo version 0.4.100.2 (2014-03-07)}{ \itemize{ \item Upgraded to Armadillo release Version 4.100.2 \itemize{ \item fix for handling null vectors by \code{normalise()} \item fix for memory handling by sparse matrices } \item Correct use of \code{[[ depends()]]} in skeleton example file \item Prepare \code{src/Makevars} for C++11 support from R 3.1.0 by defining \code{USE_CXX11} which is currently commented out \item In the Armadillo configuration, turn on C++11 support if \code{USE_CXX11} is defined } } \section{Changes in RcppArmadillo version 0.4.100.0 (2014-02-28)}{ \itemize{ \item Upgraded to Armadillo release Version 4.100.0 (Dirt Cruiser) \itemize{ \item added \code{normalise()} for normalising vectors to unit p-norm \item extended the \code{field} class to handle 3D layout \item extended \code{eigs_sym()} and \code{eigs_gen()} to obtain eigenvalues of various forms (eg. largest or smallest magnitude) \item automatic SIMD vectorisation of elementary expressions (eg. matrix addition) when using Clang 3.4+ with -O3 optimisation \item faster handling of sparse submatrix views \item workaround for a bug in LAPACK 3.4 } } } \section{Changes in RcppArmadillo version 0.4.000.4 (2014-02-19)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000.4 \itemize{ \item fix for \code{randi()} generating out-of-interval values \item fix for saving field objects \item workaround for a bug in the Intel compiler } \item Updated for \CRANpkg{Rcpp} (>= 0.11.0) by removing linking step from build process, added appropriate dependency on \CRANpkg{Rcpp} \item Updated \code{RcppArmadillo.package.skeleton} function accordingly to use proper \code{NAMESPACE} import \item Rewritten \code{rcpparma_hello_world} (which is used by the \code{RcppArmadillo.package.skeleton} function) to use Rcpp Attributes, and added more examples \item Added two functions to set Armadillo's RNGs (ie the system RNG) from a given value and to randomize it, as suggested by Gábor Csárdi. Note that these do not work within RStudio (which itself also uses the same system RNG). } } \section{Changes in RcppArmadillo version 0.4.000.2 (2014-01-21)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000.2 \itemize{ \item fix for \code{randi()} generating out-of-interval values \item workaround for a bug in the Intel compiler } } } \section{Changes in RcppArmadillo version 0.4.000 (2014-01-05)}{ \itemize{ \item Upgraded to Armadillo release Version 4.000 (Feral Steamroller) \itemize{ \item added eigen decompositions of sparse matrices: \code{eigs_sym()} and \code{eigs_gen()} [ but this requires linking against ARPACK which \CRANpkg{RcppArmadillo} as a pure-template package does not do, and R is not linked against ARPACK either. ] \item added eigen decomposition for pair of matrices: \code{eig_pair()} \item added simpler forms of \code{eig_gen()} \item added condition number of matrices: \code{cond()} \item expanded \code{find()} to handle cubes \item expanded subcube views to access elements specified in a vector \item template argument for \code{running_stat_vec} expanded to accept vector types \item more robust fast inverse of 4x4 matrices \item faster divide-and-conquer decompositions are now used by default for \code{eig_sym()}, \code{pinv()}, \code{princomp()}, \code{rank()}, \code{svd()}, \code{svd_econ()} \item the form \code{inv(sympd(X))} no longer assumes that X is positive definite; use \code{inv_sympd()} instead \item added MEX connector for interfacing Octave/Matlab with Armadillo matrices (contributed by George Yammine) } } } \section{Changes in RcppArmadillo version 0.3.930.1 (2013-12-09)}{ \itemize{ \item Upgraded to Armadillo release Version 3.930.1 \itemize{ \item Armadillo falls back to standard complex svd if the more performant divide-and-conquer variant is unavailable } \item Added detection for Lapack library and distinguish between R's own version (withhout zgesdd) and system Lapack; a preprocessor define is set accordingly } } \section{Changes in RcppArmadillo version 0.3.930.0 (2013-12-06)}{ \itemize{ \item Upgraded to Armadillo release Version 3.930 ("Dragon's Back") \itemize{ \item added divide-and-conquer variant of \code{svd_econ()}, for faster SVD \item added divide-and-conquer variant of \code{pinv()}, for faster pseudo-inverse \item added element-wise variants of \code{min()} and \code{max()} \item added \code{size()} based specifications of submatrix view sizes \item added \code{randi()} for generating matrices with random integer values \item added more intuitive specification of sort direction in \code{sort()} and \code{sort_index()} \item added more intuitive specification of method in \code{det()}, \code{.i()}, \code{inv()} and \code{solve()} \item added more precise timer for the \code{wall_clock} class when using C++11 } \item New unit tests for complex matrices and vectors } } \section{Changes in RcppArmadillo version 0.3.920.3 (2013-11-20)}{ \itemize{ \item Upgraded to Armadillo release Version 3.920.3 \itemize{ \item fix for handling of tiny matrices by \code{.swap()} } } } \section{Changes in RcppArmadillo version 0.3.920.1 (2013-09-27)}{ \itemize{ \item Upgraded to Armadillo release Version 3.920.1 (Agencia Nacional Stasi) \itemize{ \item faster \code{.zeros()} \item faster \code{round()}, \code{exp2()} and \code{log2()} when using C++11 \item added signum function: \code{sign()} \item added move constructors when using C++11 \item added 2D fast Fourier transform: \code{fft2()} \item added \code{.tube()} for easier extraction of vectors and subcubes from cubes \item added specification of a fill type during construction of Mat, Col, Row and Cube classes, eg. \code{mat X(4, 5, fill::zeros)} } \item Initial implementation of \code{wrap<subview>} \item Improved implementation of \code{as<>()} and \code{wrap()} for sparse matrices \item Converted main vignette from \code{LaTeX} style \code{minted} to \code{lstlisting} which permits builds on CRAN; removed set \code{BuildVignettes: FALSE}. } } \section{Changes in RcppArmadillo version 0.3.910.0 (2013-08-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.910.0 (Pyrenees) \itemize{ \item faster multiplication of a matrix with a transpose of itself, ie. \code{X*X.t()} and \code{X.t()*X} \item added \code{vectorise()} for reshaping matrices into vectors \item added \code{all()} and \code{any()} for indicating presence of elements satisfying a relational condition } \item Added conversion support for sparse matrices (of type double) created by the \CRANpkg{Matrix} package as class \code{dgCMatrix} \item Moved vignette sources from \code{inst/doc} to \code{vignettes}; set \code{BuildVignettes: FALSE} as the \code{minted} mode for \code{LaTeX} upsets the CRAN builders. } } \section{Changes in RcppArmadillo version 0.3.900.7 (2013-08-02)}{ \itemize{ \item Upgraded to Armadillo release Version 3.900.7 (Bavarian Sunflower) \itemize{ \item minor fix for inplace \code{reshape()} \item minor corrections for compilation issues under GCC 4.8+ and MSVC } \item Corrected setting of \code{vec_stats} in intialization of row, vector and matrix objects \item The \pkg{inline} package is no longer used in the examples and unit tests which have all been converted to using Rcpp attributes } } \section{Changes in RcppArmadillo version 0.3.900 (2013-06-04)}{ \itemize{ \item Upgraded to Armadillo release Version 3.900.0 (Bavarian Sunflower) \itemize{ \item added automatic SSE2 vectorisation of elementary expressions (eg. matrix addition) when using GCC 4.7+ with -O3 optimisation \item added support for saving & loading of cubes in HDF5 format, contributed by Szabolcs Horvat \item faster \code{median()}, contributed by Ruslan Shestopalyuk \item faster handling of compound expressions with transposes of submatrix rows \item faster handling of compound expressions with transposes of complex vectors } \item Kalman filter example switched from inline to \code{sourceCpp}, which simplifies / streamlines the C++ side a little } } \section{Changes in RcppArmadillo version 0.3.820 (2013-05-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.820 (Mt Cootha) \itemize{ \item faster \code{as_scalar()} for compound expressions \item faster transpose of small vectors \item faster matrix-vector product for small vectors \item faster multiplication of small fixed size matrices } } } \section{Changes in RcppArmadillo version 0.3.810.2 (2013-04-30)}{ \itemize{ \item Upgraded to Armadillo release Version 3.810.2 \itemize{ \item minor fix for initialisation of sparse matrices } } } \section{Changes in RcppArmadillo version 0.3.810.0 (2013-04-19)}{ \itemize{ \item Upgraded to Armadillo release Version 3.810.0 (Newell Highway) \itemize{ \item added fast Fourier transform: \code{fft()} \item added handling of \code{.imbue()} and \code{.transform()} by submatrices and subcubes \item added batch insertion constructors for sparse matrices \item minor fix for multiplication of complex sparse matrices } \item Updated sample() function and test again contributed by Christian Gunning } } \section{Changes in RcppArmadillo version 0.3.800.1 (2013-03-12)}{ \itemize{ \item Upgraded to Armadillo release Version 3.800.1 (Miami Beach) \itemize{ \item workaround for a bug in ATLAS 3.8 on 64 bit systems \item faster matrix-vector multiply for small matrices } \item Added new sample() function and tests contributed by Christian Gunning \item Refactored unit testing code for faster unit test performance } } \section{Changes in RcppArmadillo version 0.3.800.0 (2013-03-01)}{ \itemize{ \item Upgraded to Armadillo release Version 3.800.0 (Miami Beach) \itemize{ \item Armadillo is now licensed using the Mozilla Public License 2.0 \item added \code{.imbue()} for filling a matrix/cube with values provided by a functor or lambda expression \item added \code{.swap()} for swapping contents with another matrix \item added \code{.transform()} for transforming a matrix/cube using a functor or lambda expression \item added \code{round()} for rounding matrix elements towards nearest integer \item faster \code{find()} \item fixes for handling non-square matrices by \code{qr()} and \code{qr_econ()} \item minor fixes for handling empty matrices \item reduction of pedantic compiler warnings } \item Updated vignette to paper now in press at CSDA \item Added CITATION file with reference to CSDA paper } } \section{Changes in RcppArmadillo version 0.3.6.3 (2013-02-20)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.3 \itemize{ \item faster \code{find()} \item minor fix for non-contiguous submatrix views to handle empty vectors of indices \item reduction of pedantic compiler warnings } } } \section{Changes in RcppArmadillo version 0.3.6.2 (2013-01-29)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.2 \itemize{ \item faster determinant for matrices marked as diagonal or triangular \item more fine-grained handling of 64 bit integers } \item Added a new example of a Kalman filter implementation in R, and C++ using Armadillo via RcppArmadillo, complete with timing comparison } } \section{Changes in RcppArmadillo version 0.3.6.1 (2012-12-17)}{ \itemize{ \item Upgraded to Armadillo release Version 3.6.1 (Piazza del Duomo) \itemize{ \item faster \code{trace()} \item fix for handling sparse matrices by \code{dot()} \item fixes for interactions between sparse and dense matrices } \item Now throws compiler error if \code{Rcpp.h} is included before \code{RcppArmadillo.h} (as the former is included automatically by the latter anyway, but template logic prefers this ordering). } } \section{Changes in RcppArmadillo version 0.3.4.3 (2012-10-04)}{ \itemize{ \item Upgraded to Armadillo release 3.4.3 \itemize{ \item fix for aliasing issue in \code{diagmat()} \item fix for \code{speye()} signature } } } \section{Changes in RcppArmadillo version 0.3.4.2 (2012-09-25)}{ \itemize{ \item Upgraded to Armadillo release 3.4.2 \itemize{ \item minor fixes for handling sparse submatrix views \item minor speedups for sparse matrices } } } \section{Changes in RcppArmadillo version 0.3.4.1 (2012-09-18)}{ \itemize{ \item Upgraded to Armadillo release 3.4.1 \itemize{ \item workaround for a bug in the Mac OS X accelerate framework \item fixes for handling empty sparse matrices \item added documentation for saving & loading matrices in HDF5 format \item faster dot() and cdot() for complex numbers } } } \section{Changes in RcppArmadillo version 0.3.4.0 (2012-09-06)}{ \itemize{ \item Upgraded to Armadillo release 3.4.0 (Ku De Ta) \itemize{ \item added economical QR decomposition: qr_econ() \item added .each_col() & .each_row() for vector operations repeated on each column or row \item added preliminary support for sparse matrices, contributed by Ryan Curtin et al. (Georgia Institute of Technology) \item faster singular value decomposition via divide-and-conquer algorithm \item faster .randn() } \item NEWS file converted to Rd format } } \section{Changes in RcppArmadillo version 0.3.3.91 (2012-08-30)}{ \itemize{ \item Upgraded to Armadillo release 3.3.91 \itemize{ \item faster singular value decomposition via "divide and conquer" algorithm \item added economical QR decomposition: qr_econ() \item added .each_col() & .each_row() for vector operations repeated on each column or row \item added preliminary support for sparse matrices, contributed by Ryan Curtin, James Cline and Matthew Amidon (Georgia Institute of Technology) } \item Corrected summary method to deal with the no intercept case when using a formula; also display residual summary() statistics \item Expanded unit tests for fastLm } } \section{Changes in RcppArmadillo version 0.3.2.4 (2012-07-11)}{ \itemize{ \item Upgraded to Armadillo release 3.2.4 \itemize{ \item workaround for a regression (bug) in GCC 4.7.0 and 4.7.1 } } } \section{Changes in RcppArmadillo version 0.3.2.3 (2012-07-01)}{ \itemize{ \item Upgraded to Armadillo release 3.2.3 \itemize{ \item minor correction for declaration of fixed size vectors and matrices \item Reverted three header files \{Mat,Row,Col\}_bones.hpp back to previous release due to compilation failures under g++-4.7 \item Added new vignette 'RcppArmadillo-intro' based on a just-submitted introductory paper (by Eddelbuettel and Sanderson) about RcppArmadillo \item Change from release 3.2.2 which we skipped as it did not really affect builds under R: \itemize{ \item minor fix for compiling without debugging enabled (aka release mode) \item better detection of ATLAS during installation on Fedora and Red Hat systems } \item Small enhancement to fastLm } } } \section{Changes in RcppArmadillo version 0.3.2.0 (2012-05-21)}{ \itemize{ \item Upgraded to Armadillo release 3.2.0 "Creamfields" \itemize{ \item faster eigen decomposition via "divide and conquer" algorithm \item faster transpose of vectors and compound expressions \item faster handling of diagonal views \item faster handling of tiny fixed size vectors (≤ 4 elements) \item added unique(), for finding unique elements of a matrix } } } \section{Changes in RcppArmadillo version 0.3.1.94 (2012-05-15)}{ \itemize{ \item Upgraded to Armadillo release 3.1.94 "v3.2 beta 2" \itemize{ \item added unique(), for finding unique elements of a matrix \item faster eigen decomposition via "divide and conquer" algorithm \item faster transpose of vectors and compound expressions \item faster handling of tiny fixed size vectors (≤ 4 elements) } } } \section{Changes in RcppArmadillo version 0.3.1.92 (2012-05-10)}{ \itemize{ \item Upgraded to Armadillo release 3.1.92 "v3.2 beta 2" \itemize{ \item added unique(), for finding unique elements of a matrix \item faster eigen decomposition via optional use of "divide and conquer" by eig_sym() \item faster transpose of vectors and compound expressions } } } \section{Changes in RcppArmadillo version 0.3.0.3 (2012-05-03)}{ \itemize{ \item Upgraded to Armadillo release 3.0.3 \itemize{ \item fixes for inplace transpose of complex number matrices \item fixes for complex number version of svd_econ() \item fixes for potential aliasing issues with submatrix views } \item New example script fastLm } } \section{Changes in RcppArmadillo version 0.3.0.2 (2012-04-19)}{ \itemize{ \item Upgraded to Armadillo release 3.0.2 \itemize{ \item fixes for handling diagonal matrices } \item Undefine NDEBUG if it has been set (as R does) as this prevents a number of useful debugging checks. Users can still define it or define ARMA_NO_DEBUG if they want a 'non-development' build } } \section{Changes in RcppArmadillo version 0.3.0.1 (2012-04-12)}{ \itemize{ \item Upgraded to Armadillo release 3.0.1 \itemize{ \item fixes for compilation errors \item fixes for potential aliasing issues } } } \section{Changes in RcppArmadillo version 0.3.0 (2012-04-10)}{ \itemize{ \item Upgraded to Armadillo release 3.0.0 "Antarctic Chilli Ranch" \itemize{ \item added non-contiguous submatrix views \item added shorthand for inverse: .i() \item added hist() and histc() \item faster repmat() \item faster handling of submatrix views with one row or column \item faster generation of random numbers \item faster element access in fixed size matrices \item better detection of vector expressions by sum(), cumsum(), prod(), min(), max(), mean(), median(), stddev(), var() \item expressions X=A.i()*B and X=inv(A)*B are automatically converted to X=solve(A,B) } } } \section{Changes in RcppArmadillo version 0.2.40 (2012-04-04)}{ \itemize{ \item Upgraded to Armadillo release 2.99.4 "Antarctic Chilli Ranch (Beta 4)" \itemize{ \item fixes for handling expressions with fixed size matrices } } } \section{Changes in RcppArmadillo version 0.2.39 (2012-04-02)}{ \itemize{ \item Upgraded to Armadillo release 2.99.3 "Antarctic Chilli Ranch (Beta 3)" \itemize{ \item faster repmat() \item workarounds for braindead compilers (eg. Visual Studio) } } } \section{Changes in RcppArmadillo version 0.2.38 (2012-03-28)}{ \itemize{ \item Upgraded to Armadillo release 2.99.2 "Antarctic Chilli Ranch (Beta 2)" \itemize{ \item added .i() \item much faster handling of .col() and .row() \item expressions X=A.i()*B and X=inv(A)*B are automatically converted to X=solve(A,B) } } } \section{Changes in RcppArmadillo version 0.2.37 (2012-03-19)}{ \itemize{ \item Upgraded to Armadillo release 2.99.1 "Antarctic Chilli Ranch (Beta 1)" \itemize{ \item added non-contiguous submatrix views \item added hist() and histc() \item faster handling of submatrix views \item faster generation of random numbers \item faster element access in fixed size matrices \item better detection of vector expressions by sum(), cumsum(), prod(), min(), max(), mean(), median(), stddev(), var() } } } \section{Changes in RcppArmadillo version 0.2.36 (2012-03-05)}{ \itemize{ \item Upgraded to Armadillo release 2.4.4 \itemize{ \item fixes for qr() and syl() \item more portable wall_clock class \item faster relational operators on submatrices } } } \section{Changes in RcppArmadillo version 0.2.35 (2012-02-17)}{ \itemize{ \item Upgraded to Armadillo release 2.4.3 \itemize{ \item Support for ARMA_DEFAULT_OSTREAM using Rcpp::Rcout added \item Minor bug fix release improving corner cases affecting builds: \itemize{ \item Missing semicolon added in Mat_meat (when in C++0x mode), with thanks to Teo Guo Ci \item Armadillo version vars now instantiated in RcppArmadillo.cpp which helps older g++ versions, with thanks to Gershon Bialer \item Thanks also to Martin Renner for testing these changes \item Unit tests output fallback directory changed per Brian Ripley's request to not ever use /tmp \item Minor update to version numbers in RcppArmadillo-package.Rd } } } } \section{Changes in RcppArmadillo version 0.2.34 (2011-12-12)}{ \itemize{ \item Upgraded to Armadillo release 2.4.2 \itemize{ \item clarified documentation for .reshape() \item fix for handling of empty matrices by .resize() } } } \section{Changes in RcppArmadillo version 0.2.33 (2011-12-07)}{ \itemize{ \item Upgraded to Armadillo release 2.4.1 \itemize{ \item added .resize() \item fix for vector initialisation } } } \section{Changes in RcppArmadillo version 0.2.32 (2011-12-04)}{ \itemize{ \item Upgraded to Armadillo test release 2.4.0 "Loco Lounge Lizard" \item Minimal changes relative to 0.2.31 based on 2.3.92, next section is relative to the previous stable release series 2.2.* of Armadillo \itemize{ \item added shorter forms of transposes: .t() and .st() \item added optional use of 64 bit indices, allowing matrices to have more than 4 billion elements \item added experimental support for C++11 initialiser lists \item faster pinv() \item faster inplace transpose \item faster handling of expressions with diagonal views \item fixes for handling expressions with aliasing and submatrices \item fixes for linking on Ubuntu and Debian systems \item fixes for inconsistencies in interactions between matrices and cubes \item refactored code to eliminate warnings when using the Clang C++ compiler \item .print_trans() and .raw_print_trans() are deprecated } } } \section{Changes in RcppArmadillo version 0.2.31 (2011-11-28)}{ \itemize{ \item Upgraded to Armadillo test release 2.3.92 "Loco Lounge Lizard (Beta 2)" \itemize{ \item fixes for linking on Ubuntu and Debian systems \item fixes for inconsistencies in interactions between matrices and cubes } } } \section{Changes in RcppArmadillo version 0.2.30 (2011-11-19)}{ \itemize{ \item Upgraded to Armadillo test release 2.3.91 "Loco Lounge Lizard (Beta 1)" \itemize{ \item added shorter forms of transposes: .t() and .st() \item added optional use of 64 bit indices, allowing matrices to have more than 4 billion elements \item added experimental support for C++11 initialiser lists \item faster pinv() \item faster inplace transpose \item bugfixes for handling expressions with aliasing and submatrices \item refactored code to eliminate warnings when using the Clang C++ compiler \item .print_trans() and .raw_print_trans() are deprecated } } } \section{Changes in RcppArmadillo version 0.2.29 (2011-09-01)}{ \itemize{ \item Upgraded to Armadillo release 2.2.3 \itemize{ \item Release fixes a speed issue in the as_scalar() function. } } } \section{Changes in RcppArmadillo version 0.2.28 (2011-08-02)}{ \itemize{ \item Upgraded to Armadillo release 2.2.1 "Blue Skies Debauchery" \itemize{ \item faster multiplication of small matrices \item faster trans() \item faster handling of submatrices by norm() \item added economical singular value decomposition: svd_thin() \item added circ_toeplitz() \item added .is_colvec() & .is_rowvec() \item fixes for handling of complex numbers by cov(), cor(), running_stat_vec } } } \section{Changes in RcppArmadillo version 0.2.27 (2011-07-22)}{ \itemize{ \item Upgraded to Armadillo release 2.1.91 "v2.2 beta 1" \itemize{ \item faster multiplication of small matrices \item faster trans() \item faster handling of submatrices by norm() \item added economical singular value decomposition: svd_thin() \item added circ_toeplitz() \item added .is_colvec() & .is_rowvec() } } } \section{Changes in RcppArmadillo version 0.2.26 (2011-07-17)}{ \itemize{ \item Upgraded to Armadillo release 2.0.2 \itemize{ \item fix for handling of conjugate transpose by as_scalar() \item fix for handling of aliasing by diagmat() \item fix for handling of empty matrices by symmatu()/symmatl() } } } \section{Changes in RcppArmadillo version 0.2.25 (2011-06-30)}{ \itemize{ \item Upgraded to Armadillo 2.0.1 which fixes two minor compilation issues } } \section{Changes in RcppArmadillo version 0.2.24 (2011-06-29)}{ \itemize{ \item Upgraded to Armadillo release 2.0.0 "Carnivorous Sugar Glider" \itemize{ \item faster multiplication of tiny matrices (≤ 4x4) \item faster compound expressions containing submatrices \item faster inverse of symmetric positive definite matrices \item faster element access for fixed size matrices \item added handling of arbitrarily sized empty matrices (eg. 5x0) \item added loading & saving of matrices as CSV text files \item added .count() member function to running_stat and running_stat_vec \item added syl(), strans(), symmatu()/symmatl() \item added submatrices of submatrices \item det(), inv() and solve() can be forced to use more precise \item algorithms for tiny matrices (≤ 4x4) \item htrans() has been deprecated; use trans() instead \item API change: trans() now takes the complex conjugate when transposing a complex matrix \item API change: .is_vec() now outputs true for empty vectors (eg. 0x1) \item API change: forms of chol(), eig_sym(), eig_gen(), inv(), lu(), pinv(), princomp(), qr(), solve(), svd(), syl() that do not return a bool indicating success now throw std::runtime_error exceptions when failures are detected \item API change: princomp_cov() has been removed; princomp() in conjunction with cov() can be used instead \item API change: set_log_stream() & get_log_stream() have been replaced by set_stream_err1() & get_stream_err1() } } } \section{Changes in RcppArmadillo version 0.2.23 (2011-06-23)}{ \itemize{ \item Upgraded to Armadillo release 1.99.5 "v2.0 beta 5" \itemize{ \item Forms of chol(), eig_sym(), eig_gen(), inv(), lu(), pinv(), princomp(), qr(), solve(), svd(), syl() that do not return a bool indicating success now throw std::runtime_error exceptions when failures are detected \item princomp_cov() has been removed; princomp() in conjunction with cov() can be used instead \item set_log_stream() & get_log_stream() have been replaced by set_stream_err1() & get_stream_err1() \item det(), inv() and solve() can be forced to use more precise algorithms for tiny matrices (≤ 4x4) \item Added loading & saving of matrices as CSV text files } \item fastLmPure() now uses same argument order as R's lm.fit() \item Export and document S3 methods in NAMESPACE and manual page as such } } \section{Changes in RcppArmadillo version 0.2.22 (2011-06-06)}{ \itemize{ \item Upgraded to Armadillo release 1.99.4 "v2.0 beta 4" \itemize{ \item fixes for handling of tiny matrices } } } \section{Changes in RcppArmadillo version 0.2.21 (2011-05-27)}{ \itemize{ \item Upgraded to Armadillo release 1.99.3 "v2.0 beta 3" \itemize{ \item stricter size checking for row and column vectors \item added .count() member function to running_stat and running_stat_vec } } } \section{Changes in RcppArmadillo version 0.2.20 (2011-05-25)}{ \itemize{ \item Upgraded to Armadillo release 1.99.2 "v2.0 beta 2" (and 1.99.1 before) \itemize{ \item faster inverse of symmetric matrices \item faster element access for fixed size matrices \item faster multiplication of tiny matrices (eg. 4x4) \item faster compund expressions containing submatrices \item added handling of arbitrarily sized empty matrices (eg. 5x0) \item added syl() \item added strans() \item added symmatu()/symmatl() \item added submatrices of submatrices \item htrans() has been deprecated; use trans() instead \item trans() now takes the complex conjugate when transposing a complex matrix \item .is_vec() now outputs true for empty matrices \item most functions with matrix inputs no longer throw exceptions when given empty matrices (eg. 5x0) } \item Added a new subdirectory examples/ seeded with a nice Vector Autoregression simulation simulation example by Lance Bachmeier \item Rewrote armadillo_version as to no longer require an instance of arma::arma_version, with tanks to Conrad for the suggestion } } \section{Changes in RcppArmadillo version 0.2.19 (2011-04-18)}{ \itemize{ \item Upgraded to Armadillo version 1.2.0 "Unscrupulous Carbon Emitter" \itemize{ \item Added ability to use Blas & Lapack libraries with capitalised function names \item Reduction of pedantic compiler warnings } } } \section{Changes in RcppArmadillo version 0.2.18 (2011-04-03)}{ \itemize{ \item Upgraded to Armadillo version 1.1.92 "Jurassic Barbecue" \itemize{ \item Bugfix in cor() \item Automatic installation now requires CMake >= 2.6 } } } \section{Changes in RcppArmadillo version 0.2.17 (2011-03-22)}{ \itemize{ \item Upgraded to Armadillo version 1.1.90 "Inside Job" \itemize{ \item Added .min() & .max(), which can provide the extremum's location \item More robust mean(), var(), stddev() } } } \section{Changes in RcppArmadillo version 0.2.16 (2011-03-10)}{ \itemize{ \item Upgraded to Armadillo version 1.1.8 "Kangaroo Steak" \itemize{ \item Added floor() and ceil() \item Added “not a number”: math::nan() \item Added infinity: math::inf() \item Added standalone is_finite() \item Faster min(), max(), mean() \item Bugfix for a corner case with NaNs in min() and max() } } } \section{Changes in RcppArmadillo version 0.2.15 (2011-03-04)}{ \itemize{ \item Upgraded to Armadillo version 1.1.6 “Baby Carpet Shark” \itemize{ \item fixed size matrices and vectors can use auxiliary (external) memory \item .in_range() can use span() arguments \item subfields can use span() arguments } } } \section{Changes in RcppArmadillo version 0.2.14 (2011-03-02)}{ \itemize{ \item Support Run-Time Type Information (RTTI) on matrices by setting the state variable vec_state in Row and Col instantiation, with thanks to Conrad Sanderson for the hint \item fastLm code simplified further by instantiating the Armadillo matrix and vector directly from the SEXP coming from R \item inst/doc/Makefile now respects $R_HOME environment variable } } \section{Changes in RcppArmadillo version 0.2.13 (2011-02-18)}{ \itemize{ \item Upgraded to Armadillo version 1.1.4 “Manta Lodge” \itemize{ \item Faster sort() \item Updated installation to detect recent versions of Intel's MKL \item Added interpretation of arbitrary "flat" subcubes as matrices } } } \section{Changes in RcppArmadillo version 0.2.12 (2011-02-15)}{ \itemize{ \item Upgraded to Armadillo version 1.1.2 “Flood Kayak” \itemize{ \item Faster prod() \item Faster solve() for compound expressions \item Fix for compilation using GCC's C++0x mode \item Fix for matrix handling by subcubes } } } \section{Changes in RcppArmadillo version 0.2.11 (2011-01-06)}{ \itemize{ \item Upgraded to Armadillo version 1.1.0 “Climate Vandal” \itemize{ \item Extended submatrix views, including access to elements whose indices are specified in a separate vector \item Added handling of raw binary files by save/load functions \item Added cumsum() \item Added interpretation of matrices as triangular via trimatu()/trimatl() \item Faster solve(), inv() via explicit handling of triangular matrices \item The stream for logging of errors and warnings can now be changed } \item New unexported R function SHLIB, a small wrapper around R CMD SHLIB, which can be used as Rscript -e "RcppArmadillo:::SHLIB('foo.cpp')" } } \section{Changes in RcppArmadillo version 0.2.10 (2010-11-25)}{ \itemize{ \item Upgraded to Armadillo 1.0.0 "Antipodean Antileech" \itemize{ \item After 2 1/2 years of collaborative development, we are proud to release the 1.0 milestone version. \item Many thanks are extended to all contributors and bug reporters. } \item R/RcppArmadillo.package.skeleton.R: Updated to no longer rely on GNU make for builds of packages using RcppArmadillo \item summary() for fastLm() objects now returns r.squared and adj.r.squared } } \section{Changes in RcppArmadillo version 0.2.9 (2010-11-11)}{ \itemize{ \item Upgraded to Armadillo 0.9.92 "Wall Street Gangster": \itemize{ \item Fixes for compilation issues under the Intel C++ compiler \item Added matrix norms } } } \section{Changes in RcppArmadillo version 0.2.8 (2010-10-16)}{ \itemize{ \item Upgraded to Armadillo 0.9.90 "Water Dragon": \itemize{ \item Added unsafe_col() \item Speedups and bugfixes in lu() \item Minimisation of pedantic compiler warnings } \item Switched NEWS and ChangeLog between inst/ and the top-level directory so that NEWS (this file) gets installed with the package } } \section{Changes in RcppArmadillo version 0.2.7 (2010-09-25)}{ \itemize{ \item Upgraded to Armadillo 0.9.80 "Chihuahua Muncher": \itemize{ \item Added join_slices(), insert_slices(), shed_slices() \item Added in-place operations on diagonals \item Various speedups due to internal architecture improvements } } } \section{Changes in RcppArmadillo version 0.2.6 (2010-09-12)}{ \itemize{ \item Upgraded to Armadillo 0.9.70 "Subtropical Winter Safari" \item arma::Mat, arma::Row and arma::Col get constructor that take vector or matrix sugar expressions. See the unit test "test.armadillo.sugar.ctor" and "test.armadillo.sugar.matrix.ctor" for examples. } } \section{Changes in RcppArmadillo version 0.2.5 (2010-08-05)}{ \itemize{ \item Upgraded to Armadillo 0.9.60 "Killer Bush Turkey" } } \section{Changes in RcppArmadillo version 0.2.4 (2010-07-27)}{ \itemize{ \item Upgraded to Armadillo 0.9.52 'Monkey Wrench' \item src/fastLm.cpp: Switch from inv() to pinv() as inv() now tests for singular matrices and warns and returns an empty matrix which stops the example fastLm() implementation on the manual page -- and while this is generally reasonably it makes sense here to continue which the Moore-Penrose pseudo-inverse allows us to do this } } \section{Changes in RcppArmadillo version 0.2.3 (2010-06-14)}{ \itemize{ \item Better configuration to detect suncc (which does not have std::isfinite) } } \section{Changes in RcppArmadillo version 0.2.2 (2010-06-09)}{ \itemize{ \item Added RcppArmadillo:::CxxFlags for cases where RcppArmadillo is not used via a package \item Upgraded to Armadillo 0.9.10 'Chilli Espresso' \item Wrap support for mtOp, i.e. operations involving mixed types such as a complex and an arma::mat, which have been introduced in armadillo 0.9.10 \item Wrap support for mtGlue, i.e. operations involving matrices of mixed types such as an arma::mat and an arma::imat, which have been introduced in armadillo 0.9.10 \item Included an inline plugin to support the plugin system introduced in inline 0.3.5. The unit tests have moved from the src directory to the unit test directory (similar to Rcpp) using cxxfunction with the RcppArmadillo plugin. } } \section{Changes in RcppArmadillo version 0.2.1 (2010-05-19)}{ \itemize{ \item Bug-fix release permitting compilation on Windows } } \section{Changes in RcppArmadillo version 0.2.0 (2010-05-18)}{ \itemize{ \item fastLm() is now generic and has a formula interface as well as methods for print, summary, predict to behave like a standard model fitting function \item Armadillo sources (using release 0.9.8) are now included in the package using a standardized build suitable for our purposes (not assuming Boost or Atlas) -- see ?RcppArmadillo for details \item New R function RcppArmadillo.package.skeleton, similar to Rcpp::Rcpp.package.skeleton, but targetting use of RcppArmadillo } } \section{Changes in RcppArmadillo version 0.1.0 (2010-03-11)}{ \itemize{ \item the fastLm() implementation of a bare-bones lm() fit (using Armadillo's solve() function) provides an example of how efficient code can be written compactly using the combination of Rcpp, RcppAramadillo and Armadillo \item support for Rcpp implicit wrap of these types : Mat<T>, Col<T>, Row<T>, Cube<T> where T is one of : int, unsigned int, double, float \item support for Rcpp implicit as of these types : Mat<T>, Col<T>, Row<T> where R is one of : int, unsigned int, double, float } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inline_text.R \name{inline_text.tbl_summary} \alias{inline_text.tbl_summary} \title{Report statistics from summary tables inline} \usage{ \method{inline_text}{tbl_summary}(x, variable, level = NULL, column = ifelse(is.null(x$by), "stat_0", stop("Must specify column")), pvalue_fun = function(x) style_pvalue(x, prepend_p = TRUE), ...) } \arguments{ \item{x}{object created from \link{tbl_summary}} \item{variable}{variable name of statistic to present} \item{level}{level of the variable to display for categorical variables. Can also specify the 'Unknown' row. Default is \code{NULL}} \item{column}{column name to return from \code{x$table_body}. Can also pass the level of a by variable.} \item{pvalue_fun}{function to round and format p-values. Default is \code{\link{style_pvalue}}. The function must have a numeric vector input (the numeric, exact p-value), and return a string that is the rounded/formatted p-value (e.g. \code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently, \code{purrr::partial(style_pvalue, digits = 2)}).} \item{...}{not used} } \value{ A string reporting results from a gtsummary table } \description{ Extracts and returns statistics from a \code{tbl_summary} object for inline reporting in an R markdown document. Detailed examples in the \href{http://www.danieldsjoberg.com/gtsummary/articles/tbl_summary.html#inline_text}{tbl_summary vignette} } \examples{ t1 <- tbl_summary(trial) t2 <- tbl_summary(trial, by = trt) \%>\% add_p() inline_text(t1, variable = "age") inline_text(t2, variable = "grade", level = "I", column = "Drug") inline_text(t2, variable = "grade", column = "p.value") } \seealso{ Other tbl_summary tools: \code{\link{add_n}}, \code{\link{add_overall}}, \code{\link{add_p}}, \code{\link{add_q.tbl_summary}}, \code{\link{add_stat_label}}, \code{\link{bold_italicize_labels_levels}}, \code{\link{bold_p.tbl_summary}}, \code{\link{modify_header}}, \code{\link{sort_p.tbl_summary}}, \code{\link{tbl_summary}} } \author{ Daniel D. Sjoberg } \concept{tbl_summary tools}
/man/inline_text.tbl_summary.Rd
permissive
yushu-liu/gtsummary
R
false
true
2,141
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inline_text.R \name{inline_text.tbl_summary} \alias{inline_text.tbl_summary} \title{Report statistics from summary tables inline} \usage{ \method{inline_text}{tbl_summary}(x, variable, level = NULL, column = ifelse(is.null(x$by), "stat_0", stop("Must specify column")), pvalue_fun = function(x) style_pvalue(x, prepend_p = TRUE), ...) } \arguments{ \item{x}{object created from \link{tbl_summary}} \item{variable}{variable name of statistic to present} \item{level}{level of the variable to display for categorical variables. Can also specify the 'Unknown' row. Default is \code{NULL}} \item{column}{column name to return from \code{x$table_body}. Can also pass the level of a by variable.} \item{pvalue_fun}{function to round and format p-values. Default is \code{\link{style_pvalue}}. The function must have a numeric vector input (the numeric, exact p-value), and return a string that is the rounded/formatted p-value (e.g. \code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently, \code{purrr::partial(style_pvalue, digits = 2)}).} \item{...}{not used} } \value{ A string reporting results from a gtsummary table } \description{ Extracts and returns statistics from a \code{tbl_summary} object for inline reporting in an R markdown document. Detailed examples in the \href{http://www.danieldsjoberg.com/gtsummary/articles/tbl_summary.html#inline_text}{tbl_summary vignette} } \examples{ t1 <- tbl_summary(trial) t2 <- tbl_summary(trial, by = trt) \%>\% add_p() inline_text(t1, variable = "age") inline_text(t2, variable = "grade", level = "I", column = "Drug") inline_text(t2, variable = "grade", column = "p.value") } \seealso{ Other tbl_summary tools: \code{\link{add_n}}, \code{\link{add_overall}}, \code{\link{add_p}}, \code{\link{add_q.tbl_summary}}, \code{\link{add_stat_label}}, \code{\link{bold_italicize_labels_levels}}, \code{\link{bold_p.tbl_summary}}, \code{\link{modify_header}}, \code{\link{sort_p.tbl_summary}}, \code{\link{tbl_summary}} } \author{ Daniel D. Sjoberg } \concept{tbl_summary tools}
subset.int <- function(timestamp, subset) { ### internal function for data subsets num.samples <- length(timestamp) tz <- attr(timestamp, "tzone") if(is.null(tz)) tz <- "" if((!any(is.character(subset)) && !any(is.na(subset))) || length(subset)!=2) stop("Please specify 'subset' as vector of start and end time stamp") if(is.na(subset[1])) subset[1] <- as.character(timestamp[1]) if(is.na(subset[2])) subset[2] <- as.character(timestamp[num.samples]) if(nchar(subset[1])==10) subset[1] <- paste(subset[1], "00:00:00") if(nchar(subset[2])==10) subset[2] <- paste(subset[2], "00:00:00") start <- strptime(subset[1], "%Y-%m-%d %H:%M:%S", tz[1]) end <- strptime(subset[2], "%Y-%m-%d %H:%M:%S", tz[1]) if(is.na(start)) stop("'start' time stamp in 'subset' not correctly formated") if(is.na(end)) stop("'end' time stamp in 'subset' not correctly formated") if(start<timestamp[1] || start>timestamp[num.samples]) stop("'start' time stamp in 'subset' not in period") if(end<timestamp[1] || end>timestamp[num.samples]) stop("'end' time stamp in 'subset' not in period") match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(start, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") start <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(end, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") end <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) return(cbind(start, end)) }
/R/subset.int.R
no_license
paulponcet/bReeze
R
false
false
1,577
r
subset.int <- function(timestamp, subset) { ### internal function for data subsets num.samples <- length(timestamp) tz <- attr(timestamp, "tzone") if(is.null(tz)) tz <- "" if((!any(is.character(subset)) && !any(is.na(subset))) || length(subset)!=2) stop("Please specify 'subset' as vector of start and end time stamp") if(is.na(subset[1])) subset[1] <- as.character(timestamp[1]) if(is.na(subset[2])) subset[2] <- as.character(timestamp[num.samples]) if(nchar(subset[1])==10) subset[1] <- paste(subset[1], "00:00:00") if(nchar(subset[2])==10) subset[2] <- paste(subset[2], "00:00:00") start <- strptime(subset[1], "%Y-%m-%d %H:%M:%S", tz[1]) end <- strptime(subset[2], "%Y-%m-%d %H:%M:%S", tz[1]) if(is.na(start)) stop("'start' time stamp in 'subset' not correctly formated") if(is.na(end)) stop("'end' time stamp in 'subset' not correctly formated") if(start<timestamp[1] || start>timestamp[num.samples]) stop("'start' time stamp in 'subset' not in period") if(end<timestamp[1] || end>timestamp[num.samples]) stop("'end' time stamp in 'subset' not in period") match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(start, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") start <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) match.date <- difftime(timestamp, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") - difftime(end, ISOdatetime(1,1,1,0,0,0), tz=tz[1], units="days") end <- which(abs(as.numeric(match.date)) == min(abs(as.numeric(match.date)))) return(cbind(start, end)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ecs_operations.R \name{ecs_list_task_definition_families} \alias{ecs_list_task_definition_families} \title{Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any ACTIVE task definition revisions)} \usage{ ecs_list_task_definition_families(familyPrefix, status, nextToken, maxResults) } \arguments{ \item{familyPrefix}{The \code{familyPrefix} is a string that is used to filter the results of \code{ListTaskDefinitionFamilies}. If you specify a \code{familyPrefix}, only task definition family names that begin with the \code{familyPrefix} string are returned.} \item{status}{The task definition family status with which to filter the \code{ListTaskDefinitionFamilies} results. By default, both \code{ACTIVE} and \code{INACTIVE} task definition families are listed. If this parameter is set to \code{ACTIVE}, only task definition families that have an \code{ACTIVE} task definition revision are returned. If this parameter is set to \code{INACTIVE}, only task definition families that do not have any \code{ACTIVE} task definition revisions are returned. If you paginate the resulting output, be sure to keep the \code{status} value constant in each subsequent request.} \item{nextToken}{The \code{nextToken} value returned from a \code{ListTaskDefinitionFamilies} request indicating that more results are available to fulfill the request and further calls will be needed. If \code{maxResults} was provided, it is possible the number of results to be fewer than \code{maxResults}. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.} \item{maxResults}{The maximum number of task definition family results returned by \code{ListTaskDefinitionFamilies} in paginated output. When this parameter is used, \code{ListTaskDefinitions} only returns \code{maxResults} results in a single page along with a \code{nextToken} response element. The remaining results of the initial request can be seen by sending another \code{ListTaskDefinitionFamilies} request with the returned \code{nextToken} value. This value can be between 1 and 100. If this parameter is not used, then \code{ListTaskDefinitionFamilies} returns up to 100 results and a \code{nextToken} value if applicable.} } \description{ Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any \code{ACTIVE} task definition revisions). } \details{ You can filter out task definition families that do not contain any \code{ACTIVE} task definition revisions by setting the \code{status} parameter to \code{ACTIVE}. You can also filter the results with the \code{familyPrefix} parameter. } \section{Request syntax}{ \preformatted{svc$list_task_definition_families( familyPrefix = "string", status = "ACTIVE"|"INACTIVE"|"ALL", nextToken = "string", maxResults = 123 ) } } \examples{ \dontrun{ # This example lists all of your registered task definition families. svc$list_task_definition_families() # This example lists the task definition revisions that start with "hpcc". svc$list_task_definition_families( familyPrefix = "hpcc" ) } } \keyword{internal}
/paws/man/ecs_list_task_definition_families.Rd
permissive
johnnytommy/paws
R
false
true
3,376
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ecs_operations.R \name{ecs_list_task_definition_families} \alias{ecs_list_task_definition_families} \title{Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any ACTIVE task definition revisions)} \usage{ ecs_list_task_definition_families(familyPrefix, status, nextToken, maxResults) } \arguments{ \item{familyPrefix}{The \code{familyPrefix} is a string that is used to filter the results of \code{ListTaskDefinitionFamilies}. If you specify a \code{familyPrefix}, only task definition family names that begin with the \code{familyPrefix} string are returned.} \item{status}{The task definition family status with which to filter the \code{ListTaskDefinitionFamilies} results. By default, both \code{ACTIVE} and \code{INACTIVE} task definition families are listed. If this parameter is set to \code{ACTIVE}, only task definition families that have an \code{ACTIVE} task definition revision are returned. If this parameter is set to \code{INACTIVE}, only task definition families that do not have any \code{ACTIVE} task definition revisions are returned. If you paginate the resulting output, be sure to keep the \code{status} value constant in each subsequent request.} \item{nextToken}{The \code{nextToken} value returned from a \code{ListTaskDefinitionFamilies} request indicating that more results are available to fulfill the request and further calls will be needed. If \code{maxResults} was provided, it is possible the number of results to be fewer than \code{maxResults}. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.} \item{maxResults}{The maximum number of task definition family results returned by \code{ListTaskDefinitionFamilies} in paginated output. When this parameter is used, \code{ListTaskDefinitions} only returns \code{maxResults} results in a single page along with a \code{nextToken} response element. The remaining results of the initial request can be seen by sending another \code{ListTaskDefinitionFamilies} request with the returned \code{nextToken} value. This value can be between 1 and 100. If this parameter is not used, then \code{ListTaskDefinitionFamilies} returns up to 100 results and a \code{nextToken} value if applicable.} } \description{ Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any \code{ACTIVE} task definition revisions). } \details{ You can filter out task definition families that do not contain any \code{ACTIVE} task definition revisions by setting the \code{status} parameter to \code{ACTIVE}. You can also filter the results with the \code{familyPrefix} parameter. } \section{Request syntax}{ \preformatted{svc$list_task_definition_families( familyPrefix = "string", status = "ACTIVE"|"INACTIVE"|"ALL", nextToken = "string", maxResults = 123 ) } } \examples{ \dontrun{ # This example lists all of your registered task definition families. svc$list_task_definition_families() # This example lists the task definition revisions that start with "hpcc". svc$list_task_definition_families( familyPrefix = "hpcc" ) } } \keyword{internal}
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/FuGePSD.R \name{Rule.clearAntecedent} \alias{Rule.clearAntecedent} \title{Remove completely the antecedent part of a rule.} \usage{ Rule.clearAntecedent(rule) } \arguments{ \item{rule}{The rule to where we want to remove the antecedent.} } \value{ A new rule with an empty antecedent } \description{ Remove completely the antecedent part of a rule. }
/man/Rule.clearAntecedent.Rd
permissive
dejavu2010/SDEFSR
R
false
false
438
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/FuGePSD.R \name{Rule.clearAntecedent} \alias{Rule.clearAntecedent} \title{Remove completely the antecedent part of a rule.} \usage{ Rule.clearAntecedent(rule) } \arguments{ \item{rule}{The rule to where we want to remove the antecedent.} } \value{ A new rule with an empty antecedent } \description{ Remove completely the antecedent part of a rule. }
\name{position_upsideDownsideVarianceRatio} \alias{position_upsideDownsideVarianceRatio} \title{Position Upside/Downside Variance Ratio} \usage{position_upsideDownsideVarianceRatio(portfolio,symbol,thresholdReturn) } \arguments{ \item{portfolio}{Portfolio object created using \link[=portfolio_create]{portfolio_create( )} function} \item{symbol}{Unique identifier of the instrument} \item{thresholdReturn}{Return value to be used as a cut-off point} } \value{ Numeric vector of position upside/downside variance ratio values. } \description{Computes upside to downside variance ratio of a position. } \note{\url{https://www.portfolioeffect.com/docs/glossary/measures/absolute-risk-measures/upside-downside-variance-ratio} } \author{Kostin Andrey <andrey.kostin@portfolioeffect.com>} \seealso{\code{\link{position_upsideVariance}} \code{\link{position_downsideVariance}} } \examples{ \dontrun{ data(aapl.data) data(goog.data) data(spy.data) portfolio<-portfolio_create(priceDataIx=spy.data) portfolio_settings(portfolio,windowLength = '3600s',resultsSamplingInterval='60s') portfolio_addPosition(portfolio,'GOOG',100,priceData=goog.data) portfolio_addPosition(portfolio,'AAPL',300,priceData=aapl.data) portfolio_addPosition(portfolio,'SPY',150,priceData=spy.data) util_plot2d(position_upsideDownsideVarianceRatio(portfolio,'GOOG',0.05)) dateStart = "2014-11-17 09:30:00" dateEnd = "2014-11-17 16:00:00" portfolio<-portfolio_create(dateStart,dateEnd) portfolio_settings(portfolio,portfolioMetricsMode="price",windowLength = '3600s', resultsSamplingInterval='60s') portfolio_addPosition(portfolio,'AAPL',100) portfolio_addPosition(portfolio,'C',300) portfolio_addPosition(portfolio,'GOOG',150) util_plot2d(position_upsideDownsideVarianceRatio(portfolio,'AAPL',0.05)) }} \keyword{PortfolioEffectHFT} \keyword{position_upsideDownsideVarianceRatio}
/PortfolioEffectHFT/man/position_upsideDownsideVarianceRatio.Rd
permissive
githubfun/portfolioeffect-quant-r
R
false
false
1,896
rd
\name{position_upsideDownsideVarianceRatio} \alias{position_upsideDownsideVarianceRatio} \title{Position Upside/Downside Variance Ratio} \usage{position_upsideDownsideVarianceRatio(portfolio,symbol,thresholdReturn) } \arguments{ \item{portfolio}{Portfolio object created using \link[=portfolio_create]{portfolio_create( )} function} \item{symbol}{Unique identifier of the instrument} \item{thresholdReturn}{Return value to be used as a cut-off point} } \value{ Numeric vector of position upside/downside variance ratio values. } \description{Computes upside to downside variance ratio of a position. } \note{\url{https://www.portfolioeffect.com/docs/glossary/measures/absolute-risk-measures/upside-downside-variance-ratio} } \author{Kostin Andrey <andrey.kostin@portfolioeffect.com>} \seealso{\code{\link{position_upsideVariance}} \code{\link{position_downsideVariance}} } \examples{ \dontrun{ data(aapl.data) data(goog.data) data(spy.data) portfolio<-portfolio_create(priceDataIx=spy.data) portfolio_settings(portfolio,windowLength = '3600s',resultsSamplingInterval='60s') portfolio_addPosition(portfolio,'GOOG',100,priceData=goog.data) portfolio_addPosition(portfolio,'AAPL',300,priceData=aapl.data) portfolio_addPosition(portfolio,'SPY',150,priceData=spy.data) util_plot2d(position_upsideDownsideVarianceRatio(portfolio,'GOOG',0.05)) dateStart = "2014-11-17 09:30:00" dateEnd = "2014-11-17 16:00:00" portfolio<-portfolio_create(dateStart,dateEnd) portfolio_settings(portfolio,portfolioMetricsMode="price",windowLength = '3600s', resultsSamplingInterval='60s') portfolio_addPosition(portfolio,'AAPL',100) portfolio_addPosition(portfolio,'C',300) portfolio_addPosition(portfolio,'GOOG',150) util_plot2d(position_upsideDownsideVarianceRatio(portfolio,'AAPL',0.05)) }} \keyword{PortfolioEffectHFT} \keyword{position_upsideDownsideVarianceRatio}
context("ml feature standard scaler") test_that("ft_standard_scaler() default params", { test_requires_latest_spark() sc <- testthat_spark_connection() test_default_args(sc, ft_standard_scaler) }) test_that("ft_standard_scaler() param setting", { test_requires_latest_spark() sc <- testthat_spark_connection() test_args <- list( input_col = "foo", output_col = "bar", with_mean = TRUE, with_std = TRUE ) test_param_setting(sc, ft_standard_scaler, test_args) }) test_that("ft_standard_scaler() works properly", { sc <- testthat_spark_connection() sample_data_path <- get_sample_data_path("sample_libsvm_data.txt") sample_data <- spark_read_libsvm(sc, "sample_data", sample_data_path, overwrite = TRUE) scaler <- ft_standard_scaler( sc, input_col = "features", output_col = "scaledFeatures", with_std = TRUE, with_mean = FALSE, uid = "standard_scalaer_999" ) scaler_model <- ml_fit(scaler, sample_data) expect_equal( scaler_model %>% ml_transform(sample_data) %>% head(1) %>% dplyr::pull(scaledFeatures) %>% unlist() %>% sum(), 295.3425, tolerance = 0.001 ) expect_output_file( print(scaler_model), output_file("print/standard-scaler-model.txt") ) })
/tests/testthat/test-ml-feature-standard-scaler.R
permissive
tnixon/sparklyr
R
false
false
1,296
r
context("ml feature standard scaler") test_that("ft_standard_scaler() default params", { test_requires_latest_spark() sc <- testthat_spark_connection() test_default_args(sc, ft_standard_scaler) }) test_that("ft_standard_scaler() param setting", { test_requires_latest_spark() sc <- testthat_spark_connection() test_args <- list( input_col = "foo", output_col = "bar", with_mean = TRUE, with_std = TRUE ) test_param_setting(sc, ft_standard_scaler, test_args) }) test_that("ft_standard_scaler() works properly", { sc <- testthat_spark_connection() sample_data_path <- get_sample_data_path("sample_libsvm_data.txt") sample_data <- spark_read_libsvm(sc, "sample_data", sample_data_path, overwrite = TRUE) scaler <- ft_standard_scaler( sc, input_col = "features", output_col = "scaledFeatures", with_std = TRUE, with_mean = FALSE, uid = "standard_scalaer_999" ) scaler_model <- ml_fit(scaler, sample_data) expect_equal( scaler_model %>% ml_transform(sample_data) %>% head(1) %>% dplyr::pull(scaledFeatures) %>% unlist() %>% sum(), 295.3425, tolerance = 0.001 ) expect_output_file( print(scaler_model), output_file("print/standard-scaler-model.txt") ) })
################# # read all the *.bin files from directory, converts the data to xts and assigns it to the symbol derived from the filename. ################# library(mmap) library(xts) #library(quantmod) #only needed for plotting source('IBmakeVarlengthStruct.r') source('datafeed-config.r') allshares<-dir(IBdatafeedShareDir, pattern=glob2rx('*.bin')) allinfo<-dir(IBdatafeedShareDir, pattern=glob2rx('*.info')) stopifnot(length(allshares)==length(allinfo)) #~ #~ THEextractFUN<- function(x) na.trim( .xts( as.matrix( #~ cbind(x$BidSize, x$BidPrice, x$AskPrice, x$AskSize, x$Last, x$LastSize, x$Volume,x$Open,x$High,x$Low #~ ,x$BidSize2, x$BidPrice2, x$AskPrice2, x$AskSize2, x$Last2, x$LastSize2, x$Volume2,x$Open2,x$High2,x$Low2 #~ )) #~ , x$timestamp), sides='right', is.na='all') #~ # This function is agnostic to the number of columns in a datarow :)) THEextractFUN<- function(x) na.trim( .xts( do.call(cbind,x[-1]) , x[[1]]), sides='right', is.na='all') #for (tmpfile in allshares) { for (idx in 1:length(allshares)) { streamfile <- allshares[idx] infofile <- allinfo[idx] stopifnot( strsplit(streamfile,'\\.')[[1]][1]==strsplit(infofile,'\\.')[[1]][1] ) #tmpfile <- 'xxxxxxxxx.bin' #~ m <- mmap(paste(IBdatafeedShareDir,'/',tmpfile,sep=''), #~ struct(double() #~ ,double(), double(), double(), double(), double(), double(), double(),double(),double(),double() #~ ,double(), double(), double(), double(), double(), double(), double(),double(),double(),double() #~ ) #~ , extractFUN=THEextractFUN) # Get the stream's symbols from the .info file text info <- read.table(paste(IBdatafeedShareDir,'/',infofile,sep='')) thesymbols <- as.character(info[,1]) numsymbols <- length(thesymbols) atom.lst=c( list(double()) #timestamp ,rep( list(double(), double(), double(), double(), double(), double(), double(),double(),double(),double()) #other columns , numsymbols) ) ss <- IBmakeVarlengthStruct(atom.lst, 1L) m <- mmap(paste(IBdatafeedShareDir,'/',streamfile,sep=''), ss , extractFUN=THEextractFUN) #print(last(m[])) #Gets us the last non-NA timestamp entry!! tmpxts<-m[] #~ colnames(tmpxts)<-c("BidSize", "BidPrice", "AskPrice", "AskSize", "Last", "LastSize", "Volume","Open","High","Low" #~ ,"BidSize2", "BidPrice2", "AskPrice2", "AskSize2", "Last2", "LastSize2", "Volume2","Open2","High2","Low2") # Assign labels xx <- thesymbols yy <- c('BidSize', 'BidPrice', 'AskPrice', 'AskSize', 'Last', 'LastSize', 'Volume','Open','High','Low') colnames(tmpxts)<- paste(rep(xx,each=length(yy)), rep(yy, length(xx)), sep='.') # Store in .GlobalEnv assign(paste( symbol<-paste(thesymbols, collapse='_') ,'_xts',sep=''), tmpxts, envir=.GlobalEnv) #plot(tmpxts[, grep("Last$", colnames(tmpxts),ignore.case = TRUE)[1] ] ) #plot(tmpxts[, grep("Last$", colnames(tmpxts),ignore.case = TRUE)[2] ] ) print(symbol) ;print(periodicity(tmpxts)) munmap(m) #free up resources } #
/_checkMKTSharedBinXTS-N.R
no_license
parthasen/datafeedMKT-pub
R
false
false
2,974
r
################# # read all the *.bin files from directory, converts the data to xts and assigns it to the symbol derived from the filename. ################# library(mmap) library(xts) #library(quantmod) #only needed for plotting source('IBmakeVarlengthStruct.r') source('datafeed-config.r') allshares<-dir(IBdatafeedShareDir, pattern=glob2rx('*.bin')) allinfo<-dir(IBdatafeedShareDir, pattern=glob2rx('*.info')) stopifnot(length(allshares)==length(allinfo)) #~ #~ THEextractFUN<- function(x) na.trim( .xts( as.matrix( #~ cbind(x$BidSize, x$BidPrice, x$AskPrice, x$AskSize, x$Last, x$LastSize, x$Volume,x$Open,x$High,x$Low #~ ,x$BidSize2, x$BidPrice2, x$AskPrice2, x$AskSize2, x$Last2, x$LastSize2, x$Volume2,x$Open2,x$High2,x$Low2 #~ )) #~ , x$timestamp), sides='right', is.na='all') #~ # This function is agnostic to the number of columns in a datarow :)) THEextractFUN<- function(x) na.trim( .xts( do.call(cbind,x[-1]) , x[[1]]), sides='right', is.na='all') #for (tmpfile in allshares) { for (idx in 1:length(allshares)) { streamfile <- allshares[idx] infofile <- allinfo[idx] stopifnot( strsplit(streamfile,'\\.')[[1]][1]==strsplit(infofile,'\\.')[[1]][1] ) #tmpfile <- 'xxxxxxxxx.bin' #~ m <- mmap(paste(IBdatafeedShareDir,'/',tmpfile,sep=''), #~ struct(double() #~ ,double(), double(), double(), double(), double(), double(), double(),double(),double(),double() #~ ,double(), double(), double(), double(), double(), double(), double(),double(),double(),double() #~ ) #~ , extractFUN=THEextractFUN) # Get the stream's symbols from the .info file text info <- read.table(paste(IBdatafeedShareDir,'/',infofile,sep='')) thesymbols <- as.character(info[,1]) numsymbols <- length(thesymbols) atom.lst=c( list(double()) #timestamp ,rep( list(double(), double(), double(), double(), double(), double(), double(),double(),double(),double()) #other columns , numsymbols) ) ss <- IBmakeVarlengthStruct(atom.lst, 1L) m <- mmap(paste(IBdatafeedShareDir,'/',streamfile,sep=''), ss , extractFUN=THEextractFUN) #print(last(m[])) #Gets us the last non-NA timestamp entry!! tmpxts<-m[] #~ colnames(tmpxts)<-c("BidSize", "BidPrice", "AskPrice", "AskSize", "Last", "LastSize", "Volume","Open","High","Low" #~ ,"BidSize2", "BidPrice2", "AskPrice2", "AskSize2", "Last2", "LastSize2", "Volume2","Open2","High2","Low2") # Assign labels xx <- thesymbols yy <- c('BidSize', 'BidPrice', 'AskPrice', 'AskSize', 'Last', 'LastSize', 'Volume','Open','High','Low') colnames(tmpxts)<- paste(rep(xx,each=length(yy)), rep(yy, length(xx)), sep='.') # Store in .GlobalEnv assign(paste( symbol<-paste(thesymbols, collapse='_') ,'_xts',sep=''), tmpxts, envir=.GlobalEnv) #plot(tmpxts[, grep("Last$", colnames(tmpxts),ignore.case = TRUE)[1] ] ) #plot(tmpxts[, grep("Last$", colnames(tmpxts),ignore.case = TRUE)[2] ] ) print(symbol) ;print(periodicity(tmpxts)) munmap(m) #free up resources } #
library(neuralnet) library("caret") library(corrplot) library(C50) library(dummies) library(gmodels) library(Metrics) library(neuralnet) library(plyr) library(rpart) library(tree) library(e1071) library(rpart.plot) library(fastDummies) ################################## Load Files ############################################# x <- read.csv( "C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\League\\Bundesliga\\01-02.csv", stringsAsFactors = FALSE ) ################################# Clean Data ############################################## x$GBH <- as.numeric(x$GBH) x$GBD <- as.numeric(x$GBD) x$GBA <- as.numeric(x$GBA) x$IWH <- as.numeric(x$IWH) x$IWD <- as.numeric(x$IWD) x$IWA <- as.numeric(x$IWA) x$LBH <- as.numeric(x$LBH) x$LBD <- as.numeric(x$LBD) x$LBA <- as.numeric(x$LBA) x$SBH <- as.numeric(x$SBH) x$SBD <- as.numeric(x$SBD) x$SBA <- as.numeric(x$SBA) x$SYH <- as.numeric(x$SYH) x$SYD <- as.numeric(x$SYD) x$SYA <- as.numeric(x$SYA) x$WHH <- as.numeric(x$WHH) x$WHD <- as.numeric(x$WHD) x$WHA <- as.numeric(x$WHA) x <- na.exclude(x) ################################## Rename Columns ######################################### colnames(x)[1] <- "Season" ################################ Create Dummy Vars ######################################## x <- cbind.data.frame(x, dummy(x$Home)) x <- cbind.data.frame(x, dummy(x$Away)) ########################### Remove Cols After Dummy Vars ################################## x$Home <- NULL x$Away <- NULL x$Season <- NULL x$FTR <- as.factor(x$FTR) x$date <- NULL ##################################### All Bookies ######################################### NNM <- x set.seed(123) NNM.rows <- nrow(NNM) NNM.sample <- sample(NNM.rows, NNM.rows * 0.6) NN.train <- NNM[NNM.sample,] NN.test <- NNM[-NNM.sample,] NN = neuralnet(FTR ~ ., NN.train, hidden=c(10,5), linear.output = T,stepmax = 1e6) plot(NN) comp <- compute(NN, NN.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM2 <- x[-c(5:19)] set.seed(123) NNM2.rows <- nrow(NNM2) NNM2.sample <- sample(NNM2.rows, NNM2.rows * 0.6) NN2.train <- NNM2[NNM2.sample, ] NN2.test <- NNM2[-NNM2.sample, ] NN2 = neuralnet(FTR ~ ., NN2.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN2) summary(NN2) comp <- compute(NN2, NN2.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN2.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM3 <- x[-c(2:4, 8:19)] set.seed(123) NNM3.rows <- nrow(NNM3) NNM3.sample <- sample(NNM3.rows, NNM3.rows * 0.6) NN3.train <- NNM3[NNM3.sample, ] NN3.test <- NNM3[-NNM3.sample, ] NN3 = neuralnet(FTR ~ ., NN3.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN3) comp <- compute(NN3, NN3.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN3.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM4 <- x[-c(2:7, 11:19)] set.seed(123) NNM4.rows <- nrow(NNM4) NNM4.sample <- sample(NNM4.rows, NNM4.rows * 0.6) NN4.train <- NNM4[NNM4.sample, ] NN4.test <- NNM4[-NNM4.sample, ] NN4 = neuralnet(FTR ~ ., NN4.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN4) comp <- compute(NN4, NN4.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN4.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM5 <- x[-c(2:10, 14:19)] set.seed(123) NNM5.rows <- nrow(NNM5) NNM5.sample <- sample(NNM5.rows, NNM5.rows * 0.6) NN5.train <- NNM5[NNM5.sample, ] NN5.test <- NNM5[-NNM5.sample, ] NN5 = neuralnet(FTR ~ ., NN5.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN5) comp <- compute(NN5, NN5.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN5.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM6 <- x[-c(2:13, 17:19)] set.seed(123) NNM6.rows <- nrow(NNM6) NNM6.sample <- sample(NNM6.rows, NNM6.rows * 0.6) NN6.train <- NNM6[NNM6.sample, ] NN6.test <- NNM6[-NNM6.sample, ] NN6 = neuralnet(FTR ~ ., NN6.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN6) comp <- compute(NN6, NN6.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN6.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM7 <- x[-c(2:16)] set.seed(123) NNM7.rows <- nrow(NNM7) NNM7.sample <- sample(NNM7.rows, NNM7.rows * 0.6) NN7.train <- NNM7[NNM7.sample, ] NN7.test <- NNM7[-NNM7.sample, ] NN7 = neuralnet(FTR ~ ., NN7.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN7) comp <- compute(NN7, NN7.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN7.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE )
/0-Implementation/R Files/NN/League/Bundesliga/01-02.R
no_license
Chanter08/Thesis
R
false
false
6,227
r
library(neuralnet) library("caret") library(corrplot) library(C50) library(dummies) library(gmodels) library(Metrics) library(neuralnet) library(plyr) library(rpart) library(tree) library(e1071) library(rpart.plot) library(fastDummies) ################################## Load Files ############################################# x <- read.csv( "C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\League\\Bundesliga\\01-02.csv", stringsAsFactors = FALSE ) ################################# Clean Data ############################################## x$GBH <- as.numeric(x$GBH) x$GBD <- as.numeric(x$GBD) x$GBA <- as.numeric(x$GBA) x$IWH <- as.numeric(x$IWH) x$IWD <- as.numeric(x$IWD) x$IWA <- as.numeric(x$IWA) x$LBH <- as.numeric(x$LBH) x$LBD <- as.numeric(x$LBD) x$LBA <- as.numeric(x$LBA) x$SBH <- as.numeric(x$SBH) x$SBD <- as.numeric(x$SBD) x$SBA <- as.numeric(x$SBA) x$SYH <- as.numeric(x$SYH) x$SYD <- as.numeric(x$SYD) x$SYA <- as.numeric(x$SYA) x$WHH <- as.numeric(x$WHH) x$WHD <- as.numeric(x$WHD) x$WHA <- as.numeric(x$WHA) x <- na.exclude(x) ################################## Rename Columns ######################################### colnames(x)[1] <- "Season" ################################ Create Dummy Vars ######################################## x <- cbind.data.frame(x, dummy(x$Home)) x <- cbind.data.frame(x, dummy(x$Away)) ########################### Remove Cols After Dummy Vars ################################## x$Home <- NULL x$Away <- NULL x$Season <- NULL x$FTR <- as.factor(x$FTR) x$date <- NULL ##################################### All Bookies ######################################### NNM <- x set.seed(123) NNM.rows <- nrow(NNM) NNM.sample <- sample(NNM.rows, NNM.rows * 0.6) NN.train <- NNM[NNM.sample,] NN.test <- NNM[-NNM.sample,] NN = neuralnet(FTR ~ ., NN.train, hidden=c(10,5), linear.output = T,stepmax = 1e6) plot(NN) comp <- compute(NN, NN.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM2 <- x[-c(5:19)] set.seed(123) NNM2.rows <- nrow(NNM2) NNM2.sample <- sample(NNM2.rows, NNM2.rows * 0.6) NN2.train <- NNM2[NNM2.sample, ] NN2.test <- NNM2[-NNM2.sample, ] NN2 = neuralnet(FTR ~ ., NN2.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN2) summary(NN2) comp <- compute(NN2, NN2.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN2.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM3 <- x[-c(2:4, 8:19)] set.seed(123) NNM3.rows <- nrow(NNM3) NNM3.sample <- sample(NNM3.rows, NNM3.rows * 0.6) NN3.train <- NNM3[NNM3.sample, ] NN3.test <- NNM3[-NNM3.sample, ] NN3 = neuralnet(FTR ~ ., NN3.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN3) comp <- compute(NN3, NN3.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN3.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM4 <- x[-c(2:7, 11:19)] set.seed(123) NNM4.rows <- nrow(NNM4) NNM4.sample <- sample(NNM4.rows, NNM4.rows * 0.6) NN4.train <- NNM4[NNM4.sample, ] NN4.test <- NNM4[-NNM4.sample, ] NN4 = neuralnet(FTR ~ ., NN4.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN4) comp <- compute(NN4, NN4.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN4.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM5 <- x[-c(2:10, 14:19)] set.seed(123) NNM5.rows <- nrow(NNM5) NNM5.sample <- sample(NNM5.rows, NNM5.rows * 0.6) NN5.train <- NNM5[NNM5.sample, ] NN5.test <- NNM5[-NNM5.sample, ] NN5 = neuralnet(FTR ~ ., NN5.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN5) comp <- compute(NN5, NN5.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN5.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM6 <- x[-c(2:13, 17:19)] set.seed(123) NNM6.rows <- nrow(NNM6) NNM6.sample <- sample(NNM6.rows, NNM6.rows * 0.6) NN6.train <- NNM6[NNM6.sample, ] NN6.test <- NNM6[-NNM6.sample, ] NN6 = neuralnet(FTR ~ ., NN6.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN6) comp <- compute(NN6, NN6.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN6.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE ) ########################################################################################################## NNM7 <- x[-c(2:16)] set.seed(123) NNM7.rows <- nrow(NNM7) NNM7.sample <- sample(NNM7.rows, NNM7.rows * 0.6) NN7.train <- NNM7[NNM7.sample, ] NN7.test <- NNM7[-NNM7.sample, ] NN7 = neuralnet(FTR ~ ., NN7.train, hidden= 5, linear.output = FALSE,algorithm ="rprop+",learningrate = 0.1,stepmax = 1e9,act.fct = "logistic") plot(NN7) comp <- compute(NN7, NN7.test[-1]) pred.weights <- comp$net.result idx <- apply(pred.weights, 1, which.max) pred <- c('A', 'D', 'H')[idx] CrossTable( idx, NN7.test$FTR, prop.c = FALSE, prop.r = FALSE, prop.chisq = FALSE )
load("data/nba_hustle.RData") load("data/nba_advanced.RData") nba_data_all <- full_join(nba_data, nba_hustle_data, by = c("PLAYERID", "Year", "GP", "TEAM")) save(nba_data_all, file = "data/nba_data_all.RData")
/nba_defense_combiner.R
no_license
alee2019/moneyballproject
R
false
false
212
r
load("data/nba_hustle.RData") load("data/nba_advanced.RData") nba_data_all <- full_join(nba_data, nba_hustle_data, by = c("PLAYERID", "Year", "GP", "TEAM")) save(nba_data_all, file = "data/nba_data_all.RData")
block_multiple <- function(A, B, ii, sortcol, sortcol2) { result <- NULL object <- NULL for (i in 1:ncol(A)) { if(sum(sortcol[ii]==i)==0) object <- rep(0,nrow(A)) if(sum(sortcol[ii]==i)>0) object <- A[,sortcol2[ii][which(sortcol[ii]==i)]]%*%as.matrix(B[which(sortcol[ii]==i),]) result <- c(result,object) } return(as.vector(result)) }
/ES/R/block_multiple.R
no_license
ingted/R-Examples
R
false
false
350
r
block_multiple <- function(A, B, ii, sortcol, sortcol2) { result <- NULL object <- NULL for (i in 1:ncol(A)) { if(sum(sortcol[ii]==i)==0) object <- rep(0,nrow(A)) if(sum(sortcol[ii]==i)>0) object <- A[,sortcol2[ii][which(sortcol[ii]==i)]]%*%as.matrix(B[which(sortcol[ii]==i),]) result <- c(result,object) } return(as.vector(result)) }
library(HKprocess) ### Name: inferH ### Title: Posterior distribution of the H parameter of the HKp using an ### Accept-Reject algorithm. ### Aliases: inferH ### Keywords: models ### ** Examples # Posterior distribution of the H parameter of the HKp for the Nile time series. set.seed(12345) samp.sim <- inferH(Nile,500) hist(samp.sim,breaks = 20,main = "Histogram of H",xlab = "H")
/data/genthat_extracted_code/HKprocess/examples/inferH.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
395
r
library(HKprocess) ### Name: inferH ### Title: Posterior distribution of the H parameter of the HKp using an ### Accept-Reject algorithm. ### Aliases: inferH ### Keywords: models ### ** Examples # Posterior distribution of the H parameter of the HKp for the Nile time series. set.seed(12345) samp.sim <- inferH(Nile,500) hist(samp.sim,breaks = 20,main = "Histogram of H",xlab = "H")