blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e9d615ff3cbcf61c65403b5958c2187ec6bdf43c | 6c1eb0bf1a989423411292676a78ebed3160b53d | /RshinyApp/ui.R | ca6123e867ec341a40f5f3e1feb320202a028518 | [] | no_license | frankjing11/developing_data_products_assignment | 7db7f6c8517acad20efba34ea3a140dde6d1659b | a7cbe5c0b5f17668b26bd5974ecb79dd0fcea636 | refs/heads/master | 2020-04-05T23:15:12.397206 | 2016-08-12T21:41:07 | 2016-08-12T21:41:07 | 65,585,005 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 403 | r | ui.R |
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Forecasting for Air Quality Measurements"),
sidebarPanel(
selectInput("vari", "Variable:",
c("Ozone","Solar.R","Wind","Temp")),
checkboxInput("Prediction", "With Prediction", FALSE)
),
mainPanel(
h3(textOutput("title")),
plotOutput("tsPlot"),
verbatimTextOutput("fit")
)
)) |
0c65308008c1b046a02ca11d0493a47d8ef1a521 | 29585dff702209dd446c0ab52ceea046c58e384e | /SCGLR/R/print.r | d7f619958dd22db4f33812f8bb087d1af2d4fd76 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 519 | r | print.r | #' @export
#' @title Print SCGLR object
#' @description Prints inertia per component and deviance for each Y.
#' @method print SCGLR
#' @param x object of class 'SCGLR', usually a result of running \code{\link{scglr}}.
#' @param \dots Not used.
print.SCGLR <- function(x, ...) {
cat("\nCall: ", paste(deparse(x$call), sep = "\n", collapse = "\n"), sep = "","\n")
cat("\nInertia:\n")
print.default(x$inertia,print.gap=2)
cat("\nDeviance:\n")
print.default(x$deviance,print.gap=2)
cat("\n")
invisible(x)
}
|
5d5a37e8db39d177e2bf186ea0373c4f7856328b | 52243f1f5495f9a742d373a8168215126605c034 | /eastMaui/scripts/Water_in_basins.R | f551c802e814dd03f94347ecbee22cc9a2ce9d7a | [] | no_license | dwalke44/EastMauiClean | be5f54de705a58c7586bf720677d978f86b4edee | 0fdfe8e59e1b5aabd7b6f103bd0bcc125159748c | refs/heads/master | 2020-03-24T04:34:11.664399 | 2018-12-02T18:23:43 | 2018-12-02T18:23:43 | 142,457,573 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,239 | r | Water_in_basins.R | #' @export
# Function to calculate amount of water remaining in basins after diversion
# convert all nodeInputs[..., c(8:11)] to nodeInputs[..., 2]
basinWater.fun = function(nodesInput, waterInput){
diverted = list()
spsk = list()
diverted$E003 = waterInput[3, 3]
diverted$E004 = waterInput[4, 3]
diverted$E002 = waterInput[2, 3] + diverted$E003*nodesInput[228, 3] + diverted$E004*nodesInput[196, 3]
# node 49 = sink
spsk$E001s = (diverted$E002*nodesInput[49, 3] + nodesInput[49, 13])
spsk$E001 = ifelse(spsk$E001s<0, 0, spsk$E001s)
diverted$E001 = waterInput[1,3] + spsk$E001
# --------------------------------------------------------------------
diverted$E008 = waterInput[8, 3]
diverted$E009 = waterInput[9, 3]
diverted$E010 = waterInput[10, 3]
diverted$E007 = waterInput[7, 3] + diverted$E009*nodesInput[192, 3] + diverted$E008*nodesInput[193, 3] + diverted$E010*nodesInput[194, 3]
diverted$E006 = waterInput[6, 3] + diverted$E007*nodesInput[229, 3]
# node 50 = spring
spsk$E005s = (nodesInput[50,13] + diverted$E006*nodesInput[50,3])
spsk$E005 = ifelse(spsk$E005s<0, 0, spsk$E005s)
diverted$E005 = waterInput[5, 3] + spsk$E005
# --------------------------------------------------------------------
diverted$E014 = waterInput[14, 3]
diverted$E015 = waterInput[15, 3]
diverted$E016 = waterInput[16, 3] + diverted$E015*nodesInput[187, 3]
diverted$E013 = waterInput[13, 3] + diverted$E016*nodesInput[188, 3]
diverted$E012 = waterInput[12, 3] +
diverted$E013*nodesInput[195, 3] +
diverted$E014*nodesInput[186, 3]
# node 51 = spring
spsk$E011s = (nodesInput[51,13] + diverted$E012*nodesInput[51,3])
spsk$E011 = ifelse(spsk$E011s<0, 0, spsk$E011s)
diverted$E011 = waterInput[11, 3] + spsk$E011
# --------------------------------------------------------------------
diverted$E018 = waterInput[18, 3]
diverted$E017 = waterInput[17, 3] + diverted$E018*nodesInput[230, 3]
# ---------------------------------------------------------------------
diverted$E020 = waterInput[20, 3]
diverted$E021 = waterInput[21, 3]
diverted$E022 = waterInput[22, 3]
diverted$E023 = waterInput[23, 3]
diverted$E019 = waterInput[19, 3] +
diverted$E023*nodesInput[184, 3]+
diverted$E022*nodesInput[185, 3]+
diverted$E021*nodesInput[231, 3]+
diverted$E020*nodesInput[232, 3]
# -----------------------------------------------------------------------
diverted$E025 = waterInput[25, 3]
diverted$E026 = waterInput[26, 3]
diverted$E027 = waterInput[27, 3]
diverted$E028 = waterInput[28, 3]
diverted$E024 = waterInput[24, 3] + diverted$E025*nodesInput[233, 3] + diverted$E026*nodesInput[234, 3] + diverted$E027*nodesInput[182, 3]+ diverted$E028*nodesInput[183, 3]
# -----------------------------------------------------------------------
diverted$E030 = waterInput[30, 3]
diverted$E031 = waterInput[31, 3]
diverted$E033 = waterInput[33, 3]
diverted$E034 = waterInput[34, 3]
diverted$E032 = waterInput[32, 3] + diverted$E033*nodesInput[180, 3]
diverted$E029 = waterInput[29, 3] + diverted$E030*nodesInput[237, 3] + diverted$E031*nodesInput[236, 3] + diverted$E032*nodesInput[235, 3]+ diverted$E034*nodesInput[191, 3]
# -----------------------------------------------------------------------
diverted$E036 = waterInput[36, 3]
diverted$E035 = waterInput[35, 3] + diverted$E036*nodesInput[190, 3]
# -----------------------------------------------------------------------
diverted$E038 = waterInput[38, 3]
diverted$E037 = waterInput[37, 3] + diverted$E038*nodesInput[223, 3]
# -----------------------------------------------------------------------
diverted$E040 = waterInput[40, 3]
diverted$E041 = waterInput[41, 3]
diverted$E042 = waterInput[42, 3]
diverted$E043 = waterInput[43, 3]
diverted$E044 = waterInput[44 ,3] + diverted$E042*nodesInput[238, 3]
diverted$E039 = waterInput[39, 3] +
diverted$E040*nodesInput[218, 3] +
diverted$E041*nodesInput[220, 3] +
diverted$E043*nodesInput[227, 3] +
diverted$E044*nodesInput[217, 3]
# -----------------------------------------------------------------------
diverted$E047 = waterInput[47 ,3]
diverted$E048 = waterInput[48 ,3]
diverted$E049 = waterInput[49 ,3]
diverted$E050 = waterInput[50 ,3]
diverted$E051 = waterInput[51 ,3]
diverted$E052 = waterInput[52 ,3]
diverted$E053 = waterInput[53 ,3]
diverted$E054 = waterInput[54 ,3]
# basin 54 has two diversions on it
spsk$E054s = waterInput[54, 3]*nodesInput[224, 3]*nodesInput[216, 3]
spsk$E054 = ifelse(spsk$E054s<0, 0, spsk$E054s)
diverted$E046 = waterInput[46 ,3] + spsk$E054 +
diverted$E053*nodesInput[225,3] +
diverted$E051*nodesInput[243, 3] +
diverted$E050*nodesInput[242, 3] +
diverted$E049*nodesInput[241, 3] +
diverted$E047*nodesInput[240, 3] +
diverted$E048*nodesInput[239, 3] +
diverted$E052*nodesInput[222, 3]
# (diverted$E002*nodesInput[49, 3] + nodesInput[49, 13])
# node 54 = sink
spsk$E046s = waterInput[46, 3]*nodesInput[54, 3] + nodesInput[54,13]
spsk$E046 = ifelse(spsk$E046s<0 , 0, spsk$E046s)
diverted$E045 = waterInput[45 ,3] + spsk$E046
# -----------------------------------------------------------------------
diverted$E059 = waterInput[59 ,3]
diverted$E060 = waterInput[60 ,3]
diverted$E061 = waterInput[61 ,3]
diverted$E062 = waterInput[62 ,3]
diverted$E063 = waterInput[63 ,3]
diverted$E064 = waterInput[64 ,3]
diverted$E058 = waterInput[58 ,3] +
diverted$E063*nodesInput[214, 3] +
diverted$E061*nodesInput[215, 3] +
diverted$E060*nodesInput[208, 3] +
diverted$E059*nodesInput[207, 3] +
diverted$E062*nodesInput[205, 3]
diverted$E056 = waterInput[56 ,3] + diverted$E064*nodesInput[226, 3]
# node 53 = sink
spsk$E058s = diverted$E058*nodesInput[53, 3] + nodesInput[53, 13]
spsk$E058 = ifelse(spsk$E058s<0, 0, spsk$E028s)
diverted$E057 = waterInput[57 ,3] + spsk$E058
# node 52 = sink.
spsk$E056s = diverted$E056*nodesInput[52,3] + nodesInput[52,13]
spsk$E056 = ifelse(spsk$E056s<0, 0, spsk$E056s)
diverted$E055 = waterInput[55 ,3] + spsk$E056
# -----------------------------------------------------------------------
diverted$E066 = waterInput[66 ,3]
diverted$E067 = waterInput[67 ,3]
diverted$E068 = waterInput[68 ,3]
diverted$E069 = waterInput[69 ,3]
diverted$E070 = waterInput[70 ,3]
diverted$E065 = waterInput[65 ,3] +
diverted$E066*nodesInput[204, 3] +
diverted$E067*nodesInput[245, 3] +
diverted$E068*nodesInput[247, 3] +
diverted$E069*nodesInput[246, 3] +
diverted$E070*nodesInput[221, 3]
# -----------------------------------------------------------------------
diverted$E073 = waterInput[73 ,3]
diverted$E074 = waterInput[74 ,3]
diverted$E075 = waterInput[75 ,3]
diverted$E076 = waterInput[76 ,3]
# basin 77 has two diversions
diverted$E077 = waterInput[77 ,3]
spsk$E077s = waterInput[77 ,3]*nodesInput[248,3]*nodesInput[200, 3]
diverted$E078 = waterInput[78 ,3]
diverted$E072 = waterInput[72 ,3] + diverted$E075*nodesInput[206, 3]
diverted$E071 = waterInput[71 ,3] +
diverted$E072*nodesInput[281, 3] +
diverted$E073*nodesInput[249, 3] +
diverted$E074*nodesInput[198, 3] +
diverted$E076*nodesInput[199, 3] +
spsk$E077s
# -----------------------------------------------------------------------
diverted$E080 = waterInput[80 ,3]
diverted$E083 = waterInput[83 ,3]
diverted$E084 = waterInput[84 ,3]
diverted$E087 = waterInput[87 ,3]
diverted$E088 = waterInput[88 ,3]
diverted$E089 = waterInput[89 ,3]
diverted$E086 = waterInput[86 ,3] + diverted$E088*nodesInput[251, 3]
diverted$E085 = waterInput[85 ,3] +
diverted$E089*nodesInput[197, 3] +
diverted$E086*nodesInput[252, 3] +
diverted$E087*nodesInput[250, 3]
diverted$E082 = waterInput[82 ,3] +
diverted$E083*nodesInput[280, 3] +
diverted$E084*nodesInput[278, 3] +
diverted$E085*nodesInput[111, 3]
diverted$E081 = waterInput[81 ,3] + diverted$E082*nodesInput[113, 3]
diverted$E079 = waterInput[79 ,3] +
diverted$E080*nodesInput[284, 3] +
diverted$E081*nodesInput[235, 3]
# -----------------------------------------------------------------------
diverted$E093 = waterInput[93 ,3]
diverted$E092 = waterInput[92 ,3] + diverted$E093*nodesInput[72, 3]
diverted$E091 = waterInput[91 ,3]+ diverted$E092*nodesInput[279, 3]
diverted$E090 = waterInput[90 ,3] + diverted$E091*nodesInput[119, 3]
# -----------------------------------------------------------------------
diverted$E096 = waterInput[96 ,3]
diverted$E097 = waterInput[97 ,3]
diverted$E098 = waterInput[98 ,3]
diverted$E102 = waterInput[102,3]
diverted$E100 = waterInput[100,3]
diverted$E095 = waterInput[95 ,3] +
diverted$E096*nodesInput[256, 3] +
diverted$E097*nodesInput[255, 3] +
diverted$E098*nodesInput[254, 3]
diverted$E101 = waterInput[101,3] + diverted$E102*nodesInput[73, 3]
diverted$E099 = waterInput[99 ,3] +
diverted$E100*nodesInput[253, 3] +
diverted$E101*nodesInput[201, 3]
diverted$E094 = waterInput[94 ,3] +
diverted$E095*nodesInput[109, 3] +
diverted$E099*nodesInput[100, 3]
# -----------------------------------------------------------------------
diverted$E104 = waterInput[104,3]
diverted$E105 = waterInput[105,3]
diverted$E106 = waterInput[106,3]
diverted$E107 = waterInput[107,3]
diverted$E350 = waterInput[309, 3]
diverted$E103 = waterInput[103,3] +
diverted$E104*nodesInput[289, 3] +
diverted$E105*nodesInput[286, 3] +
diverted$E350*nodesInput[287, 3] +
diverted$E106*nodesInput[288, 3] +
diverted$E107*nodesInput[112, 3]
# -----------------------------------------------------------------------
diverted$E110 = waterInput[110,3]
diverted$E111 = waterInput[111,3]
diverted$E114 = waterInput[114,3]
diverted$E115 = waterInput[115,3]
diverted$E112 = waterInput[112,3] + diverted$E114*nodesInput[71, 3]
diverted$E113 = waterInput[113,3] + diverted$E115*nodesInput[139, 3]
diverted$E109 = waterInput[109,3] +
diverted$E113*nodesInput[74, 3] +
diverted$E112*nodesInput[114, 3] +
diverted$E111*nodesInput[258, 3] +
diverted$E110*nodesInput[257, 3]
diverted$E108 = waterInput[108,3] + diverted$E109*nodesInput[132, 3]
# -----------------------------------------------------------------------
diverted$E117 = waterInput[117,3]
diverted$E125 = waterInput[125, 3]
diverted$E123 = waterInput[123, 3]
diverted$E124 = waterInput[124, 3] + diverted$E125*nodesInput[61, 3]
diverted$E122 = waterInput[122, 3] + diverted$E123*nodesInput[67, 3]
diverted$E121 = waterInput[121, 3] + diverted$E124*nodesInput[68, 3]
diverted$E120 = waterInput[120, 3] +
diverted$E122*nodesInput[116, 3] +
diverted$E121*nodesInput[115, 3]
diverted$E119 = waterInput[119, 3] + diverted$E120*nodesInput[135, 3]
diverted$E118 = waterInput[118,3] + diverted$E119*nodesInput[130, 3]
diverted$E116 = waterInput[116,3] +
diverted$E117*nodesInput[290, 3] +
diverted$E118*nodesInput[110, 3]
# -----------------------------------------------------------------------
diverted$E127 = waterInput[127, 3]
diverted$E126 = waterInput[126, 3] + diverted$E127*nodesInput[128, 3]
# -----------------------------------------------------------------------
diverted$E132 = waterInput[132, 3]
diverted$E131 = waterInput[131, 3] + diverted$E132*nodesInput[60, 3]
diverted$E130 = waterInput[130, 3] + diverted$E131*nodesInput[282, 3]
diverted$E129 = waterInput[129, 3] + diverted$E130*nodesInput[136, 3]
diverted$E128 = waterInput[128, 3]+ diverted$E129*nodesInput[101, 3]
# -----------------------------------------------------------------------
diverted$E146 = waterInput[146, 3]
diverted$E147 = waterInput[147, 3]
diverted$E148 = waterInput[148, 3]
diverted$E149 = waterInput[149, 3]
diverted$E144 = waterInput[144, 3]
diverted$E141 = waterInput[141, 3]
diverted$E134 = waterInput[134, 3]
diverted$E135 = waterInput[135, 3]
diverted$E145 = waterInput[145, 3] + diverted$E146*nodesInput[56, 3]
diverted$E143 = waterInput[143, 3] + diverted$E147*nodesInput[62, 3]
diverted$E142 = waterInput[142, 3] + diverted$E148*nodesInput[83, 3]
diverted$E140 = waterInput[140, 3] +
diverted$E145*nodesInput[164, 3]+
diverted$E144*nodesInput[165, 3]+
diverted$E143*nodesInput[166, 3]+
diverted$E149*nodesInput[81, 3]
diverted$E139 = waterInput[139, 3] +
diverted$E141*nodesInput[167, 3] +
diverted$E142*nodesInput[179, 3]
diverted$E138 = waterInput[138, 3] + diverted$E139*nodesInput[203, 3]
diverted$E137 = waterInput[137, 3] + diverted$E140*nodesInput[204, 3]
diverted$E136 = waterInput[136, 3] + diverted$E138*nodesInput[293, 3]
diverted$E133 = waterInput[133, 3] +
diverted$E134*nodesInput[291, 3] +
diverted$E135*nodesInput[102, 3] +
diverted$E137*nodesInput[103, 3] +
diverted$E136*nodesInput[106, 3]
# -----------------------------------------------------------------------
diverted$E156 = waterInput[156, 3]
diverted$E153 = waterInput[153, 3]
diverted$E154 = waterInput[154, 3]
diverted$E151 = waterInput[151, 3]
diverted$E155 = waterInput[155, 3] + diverted$E156*nodesInput[88, 3]
diverted$E152 = waterInput[152, 3] +
diverted$E153*nodesInput[212, 3] +
diverted$E154*nodesInput[294, 3] +
diverted$E155*nodesInput[176, 3]
diverted$E150 = waterInput[150, 3] +
diverted$E151*nodesInput[292, 3] +
diverted$E152*nodesInput[107,3]
# -----------------------------------------------------------------------
diverted$E165 = waterInput[165, 3]
diverted$E166 = waterInput[166, 3]
diverted$E167 = waterInput[167, 3]
diverted$E163 = waterInput[163, 3] + diverted$E166*nodesInput[77, 3]
diverted$E164 = waterInput[164, 3] + diverted$E165*nodesInput[97, 3]
diverted$E161 = waterInput[161, 3] + diverted$E167*nodesInput[78, 3]
diverted$E160 = waterInput[160, 3] + diverted$E161*nodesInput[171, 3]
diverted$E162 = waterInput[162, 3] +
diverted$E163*nodesInput[172, 3] +
diverted$E164*nodesInput[173, 3]
diverted$E159 = waterInput[159, 3] +
diverted$E160*nodesInput[159, 3] +
diverted$E162*nodesInput[75, 3]
diverted$E158 = waterInput[158, 3]+ diverted$E159*nodesInput[94, 3]
diverted$E157 = waterInput[157, 3]+ diverted$E158*nodesInput[202, 3]
# -----------------------------------------------------------------------
diverted$E174 = waterInput[174, 3]
diverted$E175 = waterInput[175, 3]
# added new node 310 for basin 173
diverted$E173 = waterInput[173, 3] + diverted$E175*nodesInput[92, 3]
diverted$E172 = waterInput[172, 3] + diverted$E174*nodesInput[89, 3]
diverted$E171 = waterInput[171, 3] +
diverted$E172*nodesInput[177, 3] +
diverted$E173*nodesInput[310, 3]
diverted$E170 = waterInput[170, 3] + diverted$E171*nodesInput[95, 3]
diverted$E169 = waterInput[169, 3] + diverted$E170*nodesInput[69, 3]
diverted$E168 = waterInput[168, 3] + diverted$E169*nodesInput[154, 3]
# -----------------------------------------------------------------------
diverted$E178 = waterInput[178, 3]
diverted$E177 = waterInput[177, 3] + diverted$E178*nodesInput[178, 3]
diverted$E176 = waterInput[176, 3] + diverted$E177*nodesInput[86, 3]
# -----------------------------------------------------------------------
diverted$E188 = waterInput[188, 3]
diverted$E186 = waterInput[186, 3]
diverted$E183 = waterInput[183, 3]
diverted$E184 = waterInput[184, 3]
diverted$E182 = waterInput[182, 3] + diverted$E183*nodesInput[263, 3]
diverted$E185 = waterInput[185, 3] + diverted$E188*nodesInput[93, 3]
diverted$E187 = waterInput[187, 3] + diverted$E186*nodesInput[259, 3]
diverted$E181 = waterInput[181, 3] +
diverted$E184*nodesInput[261, 3] +
diverted$E185*nodesInput[149, 3] +
diverted$E187*nodesInput[260, 3] +
diverted$E182*nodesInput[262, 3]
diverted$E180 = waterInput[180, 3] + diverted$E181*nodesInput[55, 3]
diverted$E179 = waterInput[179, 3] + diverted$E180*nodesInput[134, 3]
# -----------------------------------------------------------------------
diverted$E191 = waterInput[191, 3]
diverted$E193 = waterInput[193, 3]
diverted$E192 = waterInput[192, 3] + diverted$E193*nodesInput[265, 3]
diverted$E202 = waterInput[202, 3]
diverted$E201 = waterInput[201, 3]+ diverted$E202*nodesInput[98, 3]
diverted$E200 = waterInput[200, 3] + diverted$E201*nodesInput[168, 3]
diverted$E195 = waterInput[195, 3]
diverted$E196 = waterInput[196, 3]
diverted$E197 = waterInput[197, 3]
diverted$E199 = waterInput[199, 3]
diverted$E198 = waterInput[198, 3] + diverted$E199*nodesInput[264, 3]
diverted$E194 = waterInput[194, 3] +
diverted$E195*nodesInput[297, 3] +
diverted$E196*nodesInput[296, 3] +
diverted$E197*nodesInput[146, 3] +
diverted$E198*nodesInput[66, 3] +
diverted$E200*nodesInput[144, 3]
diverted$E190 = waterInput[190, 3] +
diverted$E191*nodesInput[299, 3] +
diverted$E192*nodesInput[298, 3]
diverted$E189 = waterInput[189, 3] +
diverted$E194*nodesInput[123, 3] +
diverted$E190*nodesInput[131, 3]
# -----------------------------------------------------------------------
diverted$E203 = waterInput[203, 3]
# -----------------------------------------------------------------------
diverted$E210 = waterInput[210, 3]
diverted$E207 = waterInput[207, 3]
diverted$E209 = waterInput[209, 3]+ diverted$E210*nodesInput[266, 3]
diverted$E208 = waterInput[208, 3] + diverted$E209*nodesInput[108, 3]
diverted$E205 = waterInput[205, 3] +
diverted$E208*nodesInput[142, 3] +
diverted$E207*nodesInput[143, 3]
diverted$E204 = waterInput[204, 3] + diverted$E205*nodesInput[122, 3]
# -----------------------------------------------------------------------
# added new node 309 for basin 211
diverted$E213 = waterInput[213, 3]
diverted$E214 = waterInput[214, 3]
diverted$E212 = waterInput[212, 3] +
diverted$E214*nodesInput[174, 3] +
diverted$E213*nodesInput[175, 3]
diverted$E211 = waterInput[211, 3] + diverted$E212*nodesInput[120,3]
# -----------------------------------------------------------------------
diverted$E215 = waterInput[215, 3]
# -----------------------------------------------------------------------
diverted$E227 = waterInput[227, 3]
diverted$E228 = waterInput[228, 3]
diverted$E229 = waterInput[229, 3]
diverted$E218 = waterInput[218, 3]
diverted$E219 = waterInput[219, 3]
diverted$E223 = waterInput[223, 3]
diverted$E226 = waterInput[226, 3] +
diverted$E223*nodesInput[268, 3] +
diverted$E229*nodesInput[58, 3]
diverted$E225 = waterInput[225, 3] + diverted$E227*nodesInput[57, 3]
# assigned two diversions to basin 225
diverted$E224 = waterInput[224, 3]+
diverted$E225*nodesInput[148, 3]*nodesInput[267, 3] +
diverted$E226*nodesInput[138, 3]
diverted$E221 = waterInput[221, 3] + diverted$E224*nodesInput[158, 3]
diverted$E222 = waterInput[222, 3] + diverted$E228*nodesInput[59, 3]
diverted$E220 = waterInput[220, 3] + diverted$E221*nodesInput[140, 3]
diverted$E217 = waterInput[217, 3] +
diverted$E218*nodesInput[300, 3] +
diverted$E219*nodesInput[141, 3] +
diverted$E220*nodesInput[141, 3] +
diverted$E222*nodesInput[147, 3]
diverted$E216 = waterInput[216, 3] + diverted$E217*nodesInput[121, 3]
# -----------------------------------------------------------------------
diverted$E237 = waterInput[237, 3]
diverted$E238 = waterInput[238, 3]
diverted$E232 = waterInput[232, 3]
diverted$E233 = waterInput[233, 3]
diverted$E235 = waterInput[235, 3]
diverted$E236 = waterInput[236, 3] + diverted$E238*nodesInput[63, 3]
diverted$E234 = waterInput[234, 3] +
diverted$E235*nodesInput[150, 3] +
diverted$E236*nodesInput[151, 3] +
diverted$E237*nodesInput[269, 3]
diverted$E231 = waterInput[231, 3] +
diverted$E234*nodesInput[117, 3] +
diverted$E232*nodesInput[170, 3] +
diverted$E233*nodesInput[161, 3]
diverted$E230 = waterInput[230, 3] + diverted$E231*nodesInput[96, 3]
# -----------------------------------------------------------------------
diverted$E269 = waterInput[269, 3]
diverted$E270 = waterInput[270, 3]
diverted$E264 = waterInput[264, 3]
diverted$E265 = waterInput[265, 3]
diverted$E266 = waterInput[266, 3]
diverted$E267 = waterInput[267, 3]
diverted$E249 = waterInput[249, 3]
diverted$E250 = waterInput[250, 3]
diverted$E251 = waterInput[251, 3]
diverted$E243 = waterInput[243, 3]
diverted$E244 = waterInput[244, 3]
diverted$E241 = waterInput[241, 3]
diverted$E254 = waterInput[254, 3]
diverted$E255 = waterInput[255, 3]
diverted$E261 = waterInput[261, 3]
diverted$E259 = waterInput[259, 3]
diverted$E258 = waterInput[258, 3] + diverted$E259*nodesInput[153, 3]
diverted$E257 = waterInput[257, 3] + diverted$E267*nodesInput[64, 3]
diverted$E263 = waterInput[263, 3] + diverted$E270*nodesInput[275, 3]
diverted$E256 = waterInput[256, 3] + diverted$E257*nodesInput[160, 3]
diverted$E262 = waterInput[262, 3] + diverted$E265*nodesInput[272, 3]
diverted$E268 = waterInput[268, 3] + diverted$E269*nodesInput[70, 3]
diverted$E253 = waterInput[253, 3] +
diverted$E256*nodesInput[156, 3] +
diverted$E254*nodesInput[270, 3]
diverted$E246 = waterInput[246, 3] +
diverted$E258*nodesInput[209, 3] +
diverted$E255*nodesInput[155, 3]
diverted$E252 = waterInput[252, 3] + diverted$E253*nodesInput[125, 3]
diverted$E260 = waterInput[260, 3] +
diverted$E266*nodesInput[274, 3] +
diverted$E264*nodesInput[273, 3] +
diverted$E268*nodesInput[152, 3]
diverted$E245 = waterInput[245, 3] +
diverted$E261*nodesInput[271, 3] +
diverted$E262*nodesInput[276, 3] +
diverted$E263*nodesInput[210, 3]
diverted$E248 = waterInput[248, 3] +
diverted$E250*nodesInput[301, 3] +
diverted$E252*nodesInput[91, 3]
diverted$E247 = waterInput[247, 3] +
diverted$E248*nodesInput[90, 3] +
diverted$E249*nodesInput[295, 3] +
diverted$E251*nodesInput[118, 3]
diverted$E242 = waterInput[242, 3] +
diverted$E241*nodesInput[303, 3] +
diverted$E245*nodesInput[126, 3] +
diverted$E246*nodesInput[105, 3] +
diverted$E260*nodesInput[211,3]
diverted$E240 = waterInput[240, 3] +
diverted$E242*nodesInput[87, 3] +
diverted$E243*nodesInput[124, 3] +
diverted$E244*nodesInput[302, 3]
diverted$E239 = waterInput[239, 3] +
diverted$E240*nodesInput[46, 3] +
diverted$E247*nodesInput[46, 3]
# -----------------------------------------------------------------------
# diverted$E271 = 0
# -----------------------------------------------------------------------
diverted$E273 = waterInput[273, 3]
diverted$E272 = waterInput[272, 3] + diverted$E273*nodesInput[82, 3]
# -----------------------------------------------------------------------
diverted$E276 = waterInput[276, 3]
diverted$E277 = waterInput[277, 3]
diverted$E275 = waterInput[275, 3] +
diverted$E277*nodesInput[80, 3] +
diverted$E276*nodesInput[127, 3]
diverted$E274 = waterInput[274, 3] +diverted$E275*nodesInput[79, 3]
# -----------------------------------------------------------------------
# converted nodeID = 0 to nodeID = 311
diverted$E279 = waterInput[279, 3]
diverted$E280 = waterInput[280, 3]
diverted$E278 = waterInput[278, 3] +
diverted$E280*nodesInput[104, 3] +
diverted$E279*nodesInput[133, 3]
# -----------------------------------------------------------------------
# diverted$E281 = 0
# -----------------------------------------------------------------------
diverted$E283 = waterInput[283, 3]
diverted$E284 = waterInput[284, 3]
diverted$E285 = waterInput[285, 3]
diverted$E286 = waterInput[286, 3]
diverted$E282 = waterInput[282, 3] +
diverted$E283*nodesInput[304, 3] +
diverted$E284*nodesInput[305, 3] +
diverted$E285*nodesInput[306, 3] +
diverted$E286*nodesInput[169, 3]
# -----------------------------------------------------------------------
diverted$E292 = waterInput[292, 3]
diverted$E293 = waterInput[293, 3]
diverted$E291 = waterInput[291, 3] + diverted$E293*nodesInput[219, 3]
diverted$E290 = waterInput[290, 3]+ diverted$E292*nodesInput[189, 3]
diverted$E289 = waterInput[289, 3] +
diverted$E290*nodesInput[163, 3] +
diverted$E291*nodesInput[163, 3]
diverted$E288 = waterInput[288, 3] + diverted$E289*nodesInput[162, 3]
diverted$E287 = waterInput[287, 3] + diverted$E288*nodesInput[157,3]
# -----------------------------------------------------------------------
diverted$E295 = waterInput[295, 3]
diverted$E294 = waterInput[294, 3] + diverted$E295*nodesInput[308, 3]
# -----------------------------------------------------------------------
diverted$E298 = waterInput[298, 3]
diverted$E297 = waterInput[297, 3] + diverted$E298*nodesInput[213, 3]
diverted$E296 = waterInput[296, 3] + diverted$E297*nodesInput[84, 3]
# -----------------------------------------------------------------------
diverted$E300 = waterInput[300, 3]
diverted$E307 = waterInput[307, 3]
diverted$E308 = waterInput[308, 3]
diverted$E304 = waterInput[304, 3]
diverted$E305 = waterInput[305, 3]
diverted$E306 = waterInput[306, 3] +
diverted$E307*nodesInput[277, 3] +
diverted$E308*nodesInput[65, 3]
diverted$E303 = waterInput[303, 3] + diverted$E304*nodesInput[145, 3]
diverted$E302 = waterInput[302, 3] + diverted$E303*nodesInput[129, 3]
diverted$E301 = waterInput[301, 3] + diverted$E302*nodesInput[99, 2]
diverted$E299 = waterInput[299, 3] +
diverted$E300*nodesInput[307,3] +
diverted$E301*nodesInput[85, 3] +
diverted$E305*nodesInput[76,3]
# -------------------------------------------------------------------
# Save output in dataframe
output.df = as.data.frame(diverted)
# scenario = data.frame(sapply(ls(pattern="^diverted$E[0-9]+$"),get))
spsk.df = as.data.frame(spsk)
return(output.df)
}
|
e58738f8b7c83e0508674fdbd93c856666936a35 | 51ef9fa1b2212c659152fac242bda47e7bf15d6a | /man/dot-tables_base_url.Rd | 39b9b3683da679056589c2e5df6f72b28910ec5d | [] | no_license | cran/rnbp | dc5771835c012e6872cc1a7128ad017234db0dad | 7ccc244007541379fc729d5ab869bd329ef06280 | refs/heads/master | 2021-06-25T03:25:57.463674 | 2021-06-07T06:30:02 | 2021-06-07T06:30:02 | 199,285,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 290 | rd | dot-tables_base_url.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoint_tables.R
\name{.tables_base_url}
\alias{.tables_base_url}
\title{Returns the base url for the tables endpoint.}
\usage{
.tables_base_url()
}
\description{
Returns the base url for the tables endpoint.
}
|
f8c6eac09effdb7677960f95d3b0f44b5e3c6c59 | f769ea22071c2b7760e23e387f2418afcf5d3f3c | /testing_hierarchical_clusteringV5.R | ffac10cfd876a476d842eda57c7b2994dbc9a5ed | [] | no_license | MAValle/DATA_ClusterAgr_ISING | 7596b7f13b384c1f93ede2d2d554dda944d9ee7d | 2a15c50936d9a08786a95244a0aef129bdfac914 | refs/heads/master | 2020-06-01T05:06:02.790282 | 2020-02-06T03:18:00 | 2020-02-06T03:18:00 | 190,647,945 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,074 | r | testing_hierarchical_clusteringV5.R | # testing the algorithm for hierarchical agglomeration clustering using simple linkage.
# En esta version 5, probamos las funciones de functions_hclust.R para hacer
# HAC sobre la matriz de distancias del MST de los acoples para el paper y verificamos
# que el resultado del dendograma sea equivalente al del MST. Es decir, deseamos
# verificar lo indicado en pag. 237 (ver tambien pag. 241 de los apuntes.)
# El esquema de operacion es similar a lo hecho en testing_hierarchical_clusteringV4.R.
# actual name: testing_hierarchical_clusteringV5.R
# 28.ago.19
# # # # # # # # # # # # # # # # # # # # DATA LOADING # # # # # # # # # # # # # # # # # # # # #
# Carga de datos,
# igual que en clusteragrr_v1.R
rm(list = ls())
load('new_inferring_parameters_environment270219.RData') # load H and J parameters ising inferred
source("functions_hclust.R")
source("find_mst_barrier_function.R")
source("acople_distance_sum_function.R")
source("is_integer0_function.R")
library(igraph)
# # # # # # # # # # # # # # # # # # # # DATA LOADING # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # DATA LOADING # # # # # # # # # # # # # # # # # # # # #
# de los 25 nodos de J, seleccionemos los primeros 6
#set.seed(123)
# seleccionemos 5 nodos cualquiera
nd <- c(4,8,12,17,19,22) # 01oct19
nd <- c(2,4,6,8,12,15,17,19, 22) # 09oct19
nd <- c(1,4,11,13,20,25) # 10oct19
nd <- c( 1, 3, 7, 11, 13, 14, 15, 17, 25) # 11oct19a
nd <- c(1, 2, 6, 7, 8, 10, 11, 13, 14, 19, 20, 21) # 11oct19b
nd <- 1:25 # 11oct19c
J <- J[nd, nd]
# # # # # # # # # # # # # # # # # # # # DATA LOADING # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # FIND MST # # # # # # # # # # # # # # # # # # # # # #
# Conforming the COUPLING Network
#colnames(J) <- colnames(wb)
#rownames(J) <- colnames(wb)
# Vamos a transformar los nombres de los nodos originales de una vez en nombres para el dendograma
colnames(J) <- rownames(J) <- -c(1:ncol(J)) # ojo -c(1:Nn) tiene el mismo orden que colnames(dis)
#rownames(h) <- c(1:dim(J)[1])
#http://www.shizukalab.com/toolkits/sna/plotting-networks-pt-2
net <- graph_from_adjacency_matrix(as.matrix(J), mode="upper", weighted=TRUE, diag=FALSE)
#E(net)$coupling <- E(net)$weight
# Conforming the MST Network
dis <- sqrt(-J + 3)
# Convertir la matriz de distancia en objeto igraph
g <- graph_from_adjacency_matrix(dis, mode = "undirected", weighted = TRUE, diag = FALSE, add.colnames = NULL, add.rownames = NA)
E(g)$coupling <- E(net)$weight # asignamos las energias de acoples de net a E(g)$coupling
mst_g <- minimum.spanning.tree(g, algorithm="prim")
edg <- as_edgelist(mst_g, names = TRUE)
# # # # # # # # # # # # # # # # # # # # # FIND MST # # # # # # # # # # # # # # # # # # # # # #
D <- dis
Nn <- ncol(D)
diag(D) <- rep(Inf, Nn)
colnames(D) <- rownames(D) <- -c(1:Nn) # ojo -c(1:Nn) tiene el mismo orden que colnames(dis)
# the magic
merge <- hierarchical_clustering(D)
hc <- to_dendo(D, merge, enames = colnames(dis) )
plot(hc)
plot(merge$cluster, merge$dultr, type = "S", pch = 19,
col = "red", xlab = "Cluster Number", ylab = "Ultrametric Distance")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# 22 sept:
# a merge, necesito agregar dos columnas: la energia Emst asociada a cluster, y
# la energia de acoples Ec asociada a cada cluster.
# the magic --- viene de la L52
merge <- hierarchical_clustering_v2(D)
hc <- to_dendo(D, merge[,c(1:4)], enames = colnames(dis) )
plot(hc)
plot(merge$cluster, merge$dultr, type = "S", pch = 19,
col = "red", xlab = "Cluster Number", ylab = "Ultrametric Distance")
plot(merge$cluster, merge$dmst, type = "S", pch = 19,
col = "red", xlab = "Cluster Number", ylab = "dmst")
plot(merge$cluster, merge$dc, type = "S", pch = 19,
col = "red", xlab = "Cluster Number", ylab = "dc")
plot(merge$dmst, merge$dc, pch=19, xlab="dmst", ylab="dc")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
##### SOLUCIONADO!!!!
# vemos que tenemos un problema con hc$order.........
# vienen de testing_hierarchical_clusteringV3.R
# # # # # # # # # # # # # #
# para genera rl hc$order, necesitamos identificar de merge[,c(1,2)] todos los
# nodos negativos y ordenarlos en un vector en su orden de aparicion.
temp <- (merge[,c(1,2)])
temp <- as.numeric(rbind(temp$node1, temp$node2))
temp <- temp[temp < 0]
temp <- -1*temp
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dd <- as.dendrogram(hc)
temp2 <- order.dendrogram(dd)
a <- list() # initialize empty object
# define merging pattern:
# negative numbers are leaves,
# positive are merged clusters (defined by row number in $merge)
a$merge <- as.matrix(merge[,c(1,2)])
a$height <- as.vector(merge$dultr) # define merge heights
a$order <- temp2
#hc$labels <- LETTERS[1:7] # labels of leaves -----#poner nombres originales
a$labels <- c(1:ncol(D))
class(a) <- "hclust" # make it an hclust object
plot(a)
|
da1ee04a916086e63f329c0188a6cdba2f907e3e | 36d289cc65986a3d110deb08894d024ac68b5332 | /final code files/talkingdata_stacking_2Level_Lev1_lgb_xgb_rf.R | ae448850804bd493ab65ce329723220f0134e73a | [] | no_license | abhinavnew/TalkingDataCodeFiles | c614c05d4efdf7276f58aba1a157aeab836c0b82 | f71ca8ddc430c0201d5e495d4b523f5394901f79 | refs/heads/master | 2020-04-16T12:32:58.142902 | 2019-04-30T13:16:31 | 2019-04-30T13:16:31 | 165,584,142 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,912 | r | talkingdata_stacking_2Level_Lev1_lgb_xgb_rf.R |
Overalltime=Sys.time()
library(caret)
library(tidyr)
library(plyr)
library(dplyr)
library(caTools)
library(reshape2)
library(gbm)
library(caTools)
library(randomForest)
library(ggplot2)
library(data.table)
library(xgboost)
library(randomForest)
library(Matrix)
library(mlbench)
library(Boruta)
library(MLmetrics)
library(class)
library(sqldf)
library(scales)
library(TeachingDemos)
library(neuralnet)
library(lightgbm)
## This stacked model has 2 levels -level 1 -xgboost and rf/boruta and LIGHTGBM ,level 2-gbm/caret
#scientific notation off for the session
options(scipen = 999)
rm(list = ls());gc()
txtStart("capturecode.txt")
## Read given files
app_event=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\app_events.csv",data.table = FALSE,colClasses = c("character","character","character","character"))
app_labels=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\app_labels.csv",data.table=FALSE,colClasses = c("character","integer"))
events=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\events.csv",data.table=FALSE,header = TRUE,colClasses = c("character","character","character","numeric","numeric"))
label_categories=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\label_categories.csv",data.table=FALSE,colClasses = c("integer","factor"),stringsAsFactors = TRUE)
ph_bd_dev_model=read.csv("E:\\AbhinavB\\Kaggle\\TalkingData\\phone_brand_device_model.csv",colClasses = c("character","factor","factor"))
tdtrain=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\gender_age_train.csv",data.table=FALSE,colClasses = c("character","character","integer","character"))
tdtest=fread("E:\\AbhinavB\\Kaggle\\TalkingData\\gender_age_test.csv",data.table=FALSE,colClasses = "character")
gc();
colnames(ph_bd_dev_model)[colSums(is.na(ph_bd_dev_model))>0]
colnames(events)[colSums(is.na(events))>0]
colnames(app_event)[colSums(is.na(app_event))>0]
colnames(app_labels)[colSums(is.na(app_labels))>0]
colnames(label_categories)[colSums(is.na(label_categories))>0]
colnames(tdtrain)[colSums(is.na(tdtrain))>0]
colnames(tdtest)[colSums(is.na(tdtest))>0]
##remove full row duplicates from ph brand model device data
ph_bd_dev_model=ph_bd_dev_model[!duplicated(ph_bd_dev_model),]
## converting factor brand and model to numeric values and re converting to factor
ph_bd_dev_model$numbrand=as.numeric(factor(ph_bd_dev_model$phone_brand,levels=levels(ph_bd_dev_model$phone_brand)))
ph_bd_dev_model$nummodel=as.numeric(factor(ph_bd_dev_model$device_model,levels=levels(ph_bd_dev_model$device_model)))
label_categories$numCategories=as.numeric(factor(label_categories$category,levels = levels(label_categories$category)))
##Getting bins for all calls made from any device id
temp_bins=events
temp_bins$timepart=substr(temp_bins$timestamp,11,nchar(temp_bins$timestamp))
temp_bins$timepos=as.POSIXct(temp_bins$timepart,format="%H:%M:%S",tz="UTC")
temp_bins$bins=cut(temp_bins$timepos,breaks = "6 hours",labels = FALSE)
temp_bins$bins=paste0("Bin_",temp_bins$bins,sep="")
temp_bins=temp_bins %>% mutate(i=1)
temp_wide_bins=dcast(temp_bins,device_id ~ bins,value.var="i",fun.aggregate=sum)
rm(temp_bins)
str(ph_bd_dev_model)
str(label_categories)
dim(tdtrain)
dim(ph_bd_dev_model)
##removing duplicates from testset
tdtest1=distinct(tdtest)
##Adding train + test set and preparing a full set
tdtrain$ind="train"
tdtest1$gender=NA
tdtest1$age=NA
tdtest1$group=NA
tdtest1$ind="test"
fullset=rbind(tdtrain,tdtest1)
dim(fullset)
head(fullset)
dim(tdtest1)
dim(tdtrain)
##Merging trainset+testset with phonebrand and phoneModels
TrainWithPh=left_join(fullset,ph_bd_dev_model,by="device_id")
##remove duplicates
TrainWithPh=distinct(TrainWithPh)
colnames(TrainWithPh)
##Merging with Events to get app details
TrainWithEvents=left_join(TrainWithPh,events,by="device_id")
dim(TrainWithEvents)
TrainWithEvents=distinct(TrainWithEvents)
colnames(TrainWithEvents)
##removing objects to free up memory
rm(TrainWithPh)
rm(tdtrain,tdtest,tdtest1,ph_bd_dev_model,events);gc()
#######################################################################################
##Merging with app event
TrainWithAppevents=left_join(TrainWithEvents,app_event,by="event_id")
colnames(TrainWithAppevents)
##removing original datasets to free up memory
rm(TrainWithEvents,app_event);gc()
##remove duplicates
TrainWithAppevents_rel4=distinct(TrainWithAppevents)
##removing phnbrand model text cols alongwith timestamp,lat,long,is_active columns
temp2_rel=TrainWithAppevents_rel4[,-c(6,7,10,11,12,13,15)]
colnames(temp2_rel)
rm(TrainWithAppevents,TrainWithAppevents_rel4);gc()
##removing duplicates now without losing any device id
temp2_rel2=distinct(temp2_rel)
dim(temp2_rel2)
colnames(temp2_rel2)
rm(temp2_rel);gc()
##Now joining reduced set with labels
####MERGING WITH APP_LABELS TO GET CATEGORY OF APP USED
temp3=left_join(temp2_rel2,app_labels,by="app_id");
length(unique(temp3$device_id))
dim(temp3)
colnames(temp3)
rm(temp2_rel2);gc()
##find unique rows based on all columns using dplyr otherwise fails with duplicated
temp4=distinct(temp3)
length(unique(temp4$device_id))
rm(temp3);gc()
colnames(temp4)
## NOW joining to get label_categories (master table) in numerical form
temp5=left_join(temp4,label_categories,by="label_id")
length(unique(temp5$device_id))
colnames(temp5)
rm(app_event,app_labels,events,label_categories,ph_bd_dev_model,tdtest,tdtest1,tdtrain);gc()
## how to remove unnecc colums without losing any unique device id
## remove label_id and category text field
temp5_rel=temp5[,-c(10,11)]
dim(temp5_rel)
head(temp5_rel)
length(unique(temp5_rel$device_id))
colnames(temp5)
full_activeset=temp5_rel
rm(temp4,temp5,temp5_rel);gc()
## add code to update NA where Is_active =0
full_activeset$numCategories <-ifelse(full_activeset$is_active==0,"NA",full_activeset$numCategories)
##remove duplicates without losing any device id
full_activeset1=distinct(full_activeset)
colnames(full_activeset1)
colSums(is.na(full_activeset1))
full_activeset2=full_activeset1[,-c(8,9)]
full_activeset3=distinct(full_activeset2)
full_activeset3$numCategories=paste("AppCat",full_activeset3$numCategories,sep="_")
rm(fullset,full_activeset,full_activeset1,full_activeset2);gc()
head(full_activeset3)
dim(full_activeset3)
colnames(full_activeset3)
colSums(is.na(full_activeset3))
##new::: merging with device id and their timebin counts
full_activeset4=left_join(full_activeset3,temp_wide_bins,by="device_id")
### making categories wide so that there is one row per device id
wide=full_activeset4 %>%mutate(i=1)
full_wide=dcast(wide,device_id+gender+age+group+ind+numbrand+nummodel+Bin_1+Bin_2+Bin_3+Bin_4 ~ numCategories,value.var = "i",fun.aggregate = sum)
dim(full_wide)
txtStop()
##LEVEL1 of stacking model
##TRAINSET prep common for First level of STACKING MODELS-->rf/boruta ;xgboost;LIGHTGBM
##preparing train and validate set
trainset=subset(full_wide,full_wide$ind=="train")
## remove duplicate based on just one field ie device id ,doesn't matter which row is removed
trainset=trainset[!duplicated(trainset$device_id),]
##removing cols including device id which is unique and not useful for modelling
trainsetfull=trainset[,-c(1,2,3,5)]
##all categorical variables are of character datatype so far
##Breaking trainsetfull into a train and validation set so that stacking can be built
splitt=sample.split(trainsetfull$group,SplitRatio = 0.6)
trainset_splitt=subset(trainsetfull,splitt==TRUE)
validate_splitt=subset(trainsetfull,splitt==FALSE)
##TRAIN and VALIDATE set prep for all 3
trainset_rf=trainset_splitt
trainset_xg=trainset_splitt
trainset_lgbm=trainset_splitt
validateset_rf=validate_splitt
validateset_xg=validate_splitt
validateset_lgbm=validate_splitt
#Converting dependent variable to corresponding factor numeric value and storing as numeric and then converting to factor again
trainset_rf$group=as.factor(make.names(trainset_rf$group))
str(trainset)
small_trainset=trainset_rf[1:5000,]
small_trainset[is.na(small_trainset)]<--999
##TESTSET prep common for all 3 first level STACKING models
testset=subset(full_wide,full_wide$ind=="test")
colnames(testset)
dim(testset)
#remove duplicates should be 112071 rows
testset=testset[!duplicated(testset$device_id),]
dim(testset)
test_dev_id=testset$device_id
length(test_dev_id)
head(test_dev_id)
##removing cols including device id which is unique and not useful for modelling
testset=testset[,-c(1,2,3,5)]
## remove duplicates (from testset) based on just one field ie device id ,doesn't matter which row is removed
colnames(testset)
dim(testset)
##Test set prep For boruta+rf model , xgboost and lgbm
testset_rf=testset
testset_xg=testset
testset_lgbm=testset
testset_rf$group=as.factor(testset_rf$group)
rm(full_activeset3,wide)
########LEVEL-1 modelling ###################################################
##Fitting Random Forest model and running with caret/train
set.seed(114)
##Adding variable importance part
##boruta_train=Boruta(group ~.,data=small_trainset,doTrace=2)
##boruta_bank=TentativeRoughFix(boruta_train)
##keepimp=getSelectedAttributes(boruta_bank,withTentative = F)
##write.csv(as.data.frame(keepimp),"E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\boruta_Variables.csv")
keepimp=read.csv("E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\boruta_Variables.csv")
keepimp=keepimp[,-c(1)]
length(keepimp)
final_trainset_rf=trainset_rf[,names(trainset_rf) %in% keepimp]
dim(final_trainset_rf)
##paramter list for random F
trctrlobj=trainControl(method="cv",verboseIter=TRUE,classProbs = TRUE,summaryFunction = mnLogLoss)
#tgrid=expand.grid(.mtry=c(1:5))
rfmod=train(group ~.,
data=final_trainset_rf,
method="rf",
distribution="multinomial",
metric="logLoss",
trControl=trctrlobj,
allowParallel=T,
verbose=TRUE
)
out1=capture.output(rfmod)
cat("SummaryOfRFModel",out1,file="E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\RfmodelDetails.csv",sep="/n",append = T)
##making predictions on validate set (known data ) and checking accuracy mlogloss
dim(validateset_rf)
validateset_result=subset(validateset_rf,select=c("group"))
remove_col=c("group")
validateset_rf=validateset_rf[,!names(validateset_rf) %in% remove_col]
dim(validateset_rf)
pred_val_rf=predict(rfmod,newdata = validateset_rf,type = "prob")
#making training set for 2nd level of model stack
train_level_2=data.frame(pred_val_rf,validateset_result)
dim(train_level_2)
typeof(train_level_2)
##making predictions on unseen data
pred_rf=predict(rfmod,newdata=testset_rf,type="prob")
dim(pred_rf)
res_submit_rf=as.data.frame(pred_rf)
dim(res_submit_rf)
test_level_2=data.frame(res_submit_rf)
dim(test_level_2)
###############################################################
##prep for LightGBM
##trainset for lightgbm
dim(trainset_lgbm)
trainset_lgbm=trainset_lgbm[,-which(names(trainset_lgbm) %in% c("Bin_1","Bin_2","Bin_3","Bin_4","AppCat_NA"))]
dim(trainset_lgbm)
trainset_lgbm$group=as.factor(trainset_lgbm$group)
trainset_lgbm$group=as.numeric(factor(trainset_lgbm$group),levels=levels(trainset_lgbm$group))-1
str(trainset_lgbm)
tr_labels=trainset_lgbm[,"group"]
train_m_lgbm=trainset_lgbm[,-c(1)]
lgbtrain=lgb.Dataset(data =as.matrix(train_m_lgbm),label=tr_labels)
##validate set for lightgbm
dim(validateset_lgbm)
colnames(validateset_lgbm)
validateset_lgbm=validateset_lgbm[,-which(names(validateset_lgbm) %in% c("Bin_1","Bin_2","Bin_3","Bin_4","AppCat_NA"))]
dim(validateset_lgbm)
validateset_lgbm$group=as.factor(validateset_lgbm$group)
validateset_lgbm$group=as.numeric(factor(validateset_lgbm$group),levels=levels(validateset_lgbm$group))-1
str(validateset_lgbm)
val_lgbm_labels=validateset_lgbm[,"group"]
val_lgbm_m=validateset_lgbm[,-c(1)]
sparse_val_m=as.matrix(val_lgbm_m)
##Testset prep for lightGBM ::::
dim(testset_lgbm)
testset_lgbm=testset_lgbm[,-which(names(testset_lgbm) %in% c("Bin_1","Bin_2","Bin_3","Bin_4","AppCat_NA"))]
dim(testset_lgbm)
test_dev_id_lgbm=testset_lgbm$device_id
length(test_dev_id_lgbm)
head(test_dev_id_lgbm)
##removing cols including device id which is unique and not useful for modelling
##testset=testset[,-c(1,2,3,5)]
## remove duplicates (from testset) based on just one field ie device id ,doesn't matter which row is removed
#testset=testset[!duplicated(testset$device_id),]
colnames(testset_lgbm)
dim(testset_lgbm)
testset_lgbm$group=as.factor(testset_lgbm$group)
testset_lgbm$group=as.numeric(factor(testset_lgbm$group),levels=levels(testset_lgbm$group))-1
ts_lg_labels=testset_lgbm[,"group"]
test_lgbm_m=testset_lgbm[,-c(1)]
sparse_test_m=as.matrix(test_lgbm_m)
##Preparing LightGBM based model and running with lgb.train method
set.seed(114)
param=list(boosting_type="gbdt",
objective="multiclass",
metric="multi_logloss",
learning_rate=0.003,
max_depth=10,
num_leaves=5,
feature_fraction=0.7
,num_class=12
)
set.seed(114)
fitlgbcv=lgb.cv(params = param,data=lgbtrain,nfold=10,nrounds=500,early_stopping_rounds=50)
best_i=fitlgbcv$best_iter
lgbmodel1=lgb.train(params = param,data=lgbtrain,
nrounds = best_i,num_class=12)
#making predictions on validate set -LGBM model
pred_val_lgbm=predict(lgbmodel1,sparse_val_m)
#making 2nd level of train-LGBM model
pred_val_lgbm_matrix=matrix(pred_val_lgbm,ncol =12)
pred_val_lgbm_df=as.data.frame(pred_val_lgbm_matrix)
train_level_3=cbind(train_level_2,pred_val_lgbm_df)
train_level_3$group=make.names(train_level_3$group)
dim(train_level_2)
typeof(train_level_2)
dim(train_level_3)
##making predictions on unseen data-LGBM model
pred_test_lgbm=predict(lgbmodel1,sparse_test_m)
length(pred_test_lgbm)
#making 2nd level of TEST-LGBM model
pred_lgbm_mat=matrix(pred_test_lgbm,ncol = 12)
pred_lgbm_df=as.data.frame(pred_lgbm_mat)
test_level_3=cbind(test_level_2,pred_lgbm_df)
dim(test_level_2)
typeof(test_level_2)
dim(test_level_3)
typeof(test_level_3)
##prep for xgboost
#trainset for xgboost
trainset_xg$group=as.factor(trainset_xg$group)
trainset_xg$group=as.numeric(factor(trainset_xg$group),levels=levels(trainset_xg$group))-1
#trainset$group=as.factor(trainset$group)
str(trainset)
tr_labels=trainset_xg[,"group"]
length(tr_labels)
prev_action=options('na.action')
options(na.action = 'na.pass')
train_m=sparse.model.matrix(group ~ .-1,data=trainset_xg)
dim(train_m)
dtrain=xgb.DMatrix(data=as.matrix(train_m),label=tr_labels,missing = NA)
#validateset for xgboost
validateset_xg$group=as.factor(validateset_xg$group)
validateset_xg$group=as.numeric(factor(validateset_xg$group),levels=levels(validateset_xg$group))-1
str(validateset_xg)
vr_labels=validateset_xg[,"group"]
validate_m=sparse.model.matrix(group ~ .-1,data=validateset_xg)
dim(validate_m)
dvalidate=xgb.DMatrix(data=as.matrix(validate_m),label=vr_labels,missing = NA)
##Testset prep for xgboost ::::
testset_xg$group=as.factor(testset_xg$group)
testset_xg$group=as.numeric(factor(testset_xg$group),levels=levels(testset_xg$group))-1
#testset$group=as.factor(testset$group)
ts_labels=testset_xg[,"group"]
test_m=testset_xg[,-c(1)]
dim(test_m)
dtest=xgb.DMatrix(data=as.matrix(test_m),label=ts_labels,missing = NA)
##write.csv(dtest,"E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\dtest.csv",quote = F)
options(na.action = prev_action$na.action)
rm(full_activeset3,wide)
##Preparing XGBoost model and running with caret/train
set.seed(114)
##paramter list for xgboost
nc=length(unique(tr_labels))
param=list(booster="gblinear",
num_class=nc,
objective="multi:softprob",
eval_metric="mlogloss",
eta=0.01,
lambda=5,
lambda_bias=0,
alpha=2)
watch=list(train=dtrain)
#ntree=280
set.seed(114)
##xgboost model
xgb_mod1=xgb.train(params = param,
data=dtrain,
watchlist = watch,
nrounds = 280,
verbose = 1)
#making predictions on validate set
pred_val_xg=predict(xgb_mod1,newdata = dvalidate)
length(pred_val_xg)
#making 2nd level of train
pred_val_xg_matrix=matrix(pred_val_xg,ncol =12)
pred_val_xg_df=as.data.frame(pred_val_xg_matrix)
colnames(pred_val_xg_df)=paste(colnames(pred_val_xg_df),"xgparam",sep = "_")
train_level_4=cbind(train_level_3,pred_val_xg_df)
train_level_4$group=make.names(train_level_4$group)
dim(train_level_4)
##making predictions on unseen data
pred_xg=predict(xgb_mod1,newdata=dtest)
length(pred_xg)
#making 2nd level of TEST
pred_xg_mat=matrix(pred_xg,ncol = 12)
pred_xg_df=as.data.frame(pred_xg_mat)
colnames(pred_xg_df)=paste(colnames(pred_xg_df),"xgparam",sep = "_")
test_level_4=cbind(test_level_3,pred_xg_df)
dim(test_level_4)
typeof(test_level_4)
##2nd level of MODEL STACK -GBM/caret
# passing prev predictions and test set to 2nd level of gbm model
trctrlobj=trainControl(method="cv",verboseIter=FALSE,classProbs = TRUE,summaryFunction = mnLogLoss)
gbmmod1=train(group ~.,
data=train_level_4,
method="gbm",
distribution="multinomial",
metric="logLoss",
trControl=trctrlobj,
verbose=FALSE
)
pred_final_gbm=predict(gbmmod1,newdata=test_level_4,type='prob')
res_submit=cbind(test_dev_id,as.data.frame(pred_final_gbm))
##colnames(res_submit)=c("device_id","F23-","F24-26","F27-28","F29-32","F33-42","F43+","M22-","M23-26","M27-28","M29-31","M32-38","M39+")
##write.csv(res_submit,"E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\Submit3.csv",row.names = F,quote = F)
##endtime=proc.time()
timetakens=starttime-endtime
cat(timetakens)
colnames(res_submit)=c("device_id","F23-","F24-26","F27-28","F29-32","F33-42","F43+","M22-","M23-26","M27-28","M29-31","M32-38","M39+")
write.csv(res_submit,"E:\\AbhinavB\\Kaggle\\TalkingData\\SubmissionFiles\\Submit17A_dup_2Levelstack_Withlgbm.csv",row.names = F,quote = F)
Overallendtime=Sys.time()-Overalltime
print(Overallendtime)
|
5fc4210bb3e130e9fd45df6cf744bcce9bee0d7f | 65dce4f93d5fbdbc81d20470d17f4ce2e440f29a | /exercises/topic2-regression.r | e5eadbd392081b393d14647ddb8c29595fdfbc66 | [] | no_license | tertiarycourses/FullRMachineLearning | 37a1ee1e5c8ce0c893ac3f3dfa0eae642a097658 | 6fea62db4b8367481bc28993473eae6f283774a6 | refs/heads/master | 2021-06-28T18:08:37.032920 | 2020-12-20T03:18:03 | 2020-12-20T03:18:03 | 190,512,174 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,162 | r | topic2-regression.r | # Topic 2 Regression
#install.packages("mlr")
#install.packages("glmnet")
#install.packages("ggplot2")
#install.packages("cowplot")
#install.packages("MASS")
#install.packages("dplyr")
library(mlr)
library(glmnet)
library(ggplot2)
library(cowplot)
library(MASS)
library(dplyr)
theme_set(theme_bw())
# Linear Regresson Demo
# Data
mouse.data <- data.frame(
weight=c(0.9, 1.8, 2.4, 3.5, 3.9, 4.4, 5.1, 5.6, 6.3),
size=c(1.4, 2.6, 1.0, 3.7, 5.5, 3.2, 3.0, 4.9, 6.3))
mouse.data
plot(mouse.data$weight, mouse.data$size)
## create a "linear model" - that is, do the regression
mouse.regression <- lm(size ~ weight, data=mouse.data)
## generate a summary of the regression
summary(mouse.regression)
## add the regression line to our x/y scatter plot
abline(mouse.regression, col="blue")
# Ex: Linear Regresssoin
data(quakes)
plot(quakes$stations, quakes$mag)
quake.regression = lm (mag~stations, data=quakes)
summary(quake.regression)
abline(quake.regression, col="blue")
# Multiple Regression Demo
# Data
mouse.data <- data.frame(
size = c(1.4, 2.6, 1.0, 3.7, 5.5, 3.2, 3.0, 4.9, 6.3),
weight = c(0.9, 1.8, 2.4, 3.5, 3.9, 4.4, 5.1, 5.6, 6.3),
tail = c(0.7, 1.3, 0.7, 2.0, 3.6, 3.0, 2.9, 3.9, 4.0))
mouse.data
plot(mouse.data)
# create a "multi regression model"
multiple.regression <- lm(size ~ weight + tail, data=mouse.data)
# generate a summary of the regression
summary(multiple.regression)
# Ex: Multi Regression Demo
data(mtcars)
mtcars.regressiion <-lm(mpg~hp+wt+disp,data=mtcars)
predict(mtcars.regressiion,data.frame(disp=160,hp=110,wt=2.6))
# Ridge Regularization
set.seed(123)
# Center y, X will be standardized in the modelling function
y <- mtcars %>% select(mpg) %>% scale(center = TRUE, scale = FALSE) %>% as.matrix()
X <- mtcars %>% select(-mpg) %>% as.matrix()
# Perform 10-fold cross-validation to select lambda
lambdas_to_try <- 10^seq(-3, 5, length.out = 100)
# Setting alpha = 0 implements ridge regression
ridge_cv <- cv.glmnet(X, y, alpha = 0, lambda = lambdas_to_try,standardize = TRUE, nfolds = 10)
# Plot cross-validation results
plot(ridge_cv)
# Best cross-validated lambda
lambda_cv <- ridge_cv$lambda.min
lambda_cv
# Prediction
model_cv <- glmnet(X, y, alpha = 0, lambda = lambda_cv, standardize = TRUE)
y_hat_cv <- predict(model_cv, X)
# Sum of Squares Total and Error
sst <- sum((y - mean(y))^2)
sse <- sum((y_hat_cv - y)^2)
# R squared
rsq <- 1 - sse / sst
rsq
# Lasso Regularization
# Perform 10-fold cross-validation to select lambda
lambdas_to_try <- 10^seq(-3, 5, length.out = 100)
# Setting alpha = 1 implements lasso regression
lasso_cv <- cv.glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10)
# Plot cross-validation results
plot(lasso_cv)
# Best cross-validated lambda
lambda_cv <- lasso_cv$lambda.min
lambda_cv
# Prediction
model_cv <- glmnet(X, y, alpha = 1, lambda = lambda_cv, standardize = TRUE)
y_hat_cv <- predict(model_cv, X)
# Sum of Squares Total and Error
sst <- sum((y - mean(y))^2)
sse <- sum((y_hat_cv - y)^2)
# R squared
rsq <- 1 - sse / sst
rsq
# See how increasing lambda shrinks the coefficients --------------------------
# Each line shows coefficients for one variables, for different lambdas.
# The higher the lambda, the more the coefficients are shrinked towards zero.
res <- glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = FALSE)
plot(res, xvar = "lambda")
legend("bottomright", lwd = 1, col = 1:6, legend = colnames(X), cex = .7)
# Ex: Elastic Net Regularizaton
# Perform 10-fold cross-validation to select lambda
lambdas_to_try <- 10^seq(-3, 5, length.out = 100)
# Setting alpha = 1 implements lasso regression
elasticnet_cv <- cv.glmnet(X, y, alpha = 0.2, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10)
# Plot cross-validation results
plot(elasticnet_cv)
# Best cross-validated lambda
lambda_cv <- elasticnet_cv$lambda.min
lambda_cv
# Prediction
model_cv <- glmnet(X, y, alpha = 1, lambda = lambda_cv, standardize = TRUE)
y_hat_cv <- predict(model_cv, X)
# Sum of Squares Total and Error
sst <- sum((y - mean(y))^2)
sse <- sum((y_hat_cv - y)^2)
# R squared
rsq <- 1 - sse / sst
rsq
|
4a1a43699d2b7f3e9faf4f7641d4aaa087e5809f | 1a4b653701ea2cbee79bf52e8e4b1bdb4b6d9d45 | /CVS case/old_0404/CVS_3_nonsmk_match_reg.r | 3e5913f409e1f1f1bc72b3fde475aa5069d2d4fb | [] | no_license | Superet/Tobacco | c5fbab709d2ea7be74be2f0cc7466d279ebcd551 | 7fe51f480cd3d0db812987c3cf73c7086978980b | refs/heads/master | 2021-05-04T10:29:17.916670 | 2017-07-15T19:13:54 | 2017-07-15T19:13:54 | 44,255,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,880 | r | CVS_3_nonsmk_match_reg.r | library(reshape2)
library(ggplot2)
library(data.table)
library(lubridate)
library(stargazer)
library(zipcode)
library(plm)
library(lme4)
library(xlsx)
library(MatchIt)
library(lmtest)
library(Matching)
# setwd("~/Documents/Research/Tobacco/processed_data")
# plot.wd <- "~/Desktop"
setwd("U:/Users/ccv103/Documents/Research/tobacco/processed_data")
# setwd("/sscc/home/c/ccv103/Tobacco")
plot.wd <- getwd()
out.file <- "cvs_nonsmk_match_agg"
ww <- 6.5
ar <- .6
sink(paste("log_", out.file, ".txt", sep=""), append = FALSE)
panelists <- read.csv("tob_CVS_pan.csv", header = T)
purchases <- read.csv("tob_CVS_purchases.csv", header = T)
trips <- read.csv("tob_CVS_trips.csv", header = T)
nsmk.pan <- read.csv("tob_CVS_nonsmk_pan.csv", header = T)
nsmk.trips <- read.csv("tob_CVS_nonsmk_trips.csv", header = T)
ma.policy <- read.xlsx("MA_policy.xlsx", 1)
panelists$smk <- 1
nsmk.pan$smk <- 0
trips$smk <- 1
nsmk.trips$smk <- 0
panelists <- rbind(panelists, nsmk.pan)
trips <- rbind(trips, nsmk.trips)
names(panelists) <- tolower(names(panelists))
# # Select a random sample
# sel <- sample(unique(panelists$household_code), .1*length(unique(panelists$household_code)))
# panelists <- subset(panelists, household_code %in% sel)
# trips <- subset(trips, household_code %in% sel)
# purchases <- subset(purchases, household_code %in% sel )
############
# Function #
############
Cls.se.fn <- function(model, cluster.vec, return.se = TRUE){
# Var(beta) = (X'X)^(-1) [(eps*X)'(eps*X)](X'X)^(-1)
X <- model.matrix(model)
uj <- residuals(model) * X
uj <- apply(uj, 2, function(x) tapply(x, cluster.vec, sum))
A <- solve(crossprod(X))
cls.vcov <- A %*% crossprod(uj) %*% A
if(return.se){
return(sqrt(diag(cls.vcov)))
}else{
return(cls.vcov)
}
}
#################
# Organize data #
#################
# Add week
event.date <- as.Date("2014-09-01", format = "%Y-%m-%d") # Placebo event
event.month <- month(event.date) + 12
cvs.ret <- 4914 # retailer_code for CVS
qunit <- 20 # 20 cigaretts per pack
firstw <- as.Date("2012-12-31", format = "%Y-%m-%d") # The first week in 2013
purchases$purchase_date <- as.Date(as.character(purchases$purchase_date), format = "%Y-%m-%d")
purchases$week <- ((as.numeric(purchases$purchase_date - firstw)) %/%7 + 1)* 7 + firstw - 1
purchases$year <- year(purchases$purchase_date)
purchases$month <- month(purchases$purchase_date)
purchases$month <- ifelse(purchases$year == 2012, 1, ifelse(purchases$year == 2013, purchases$month, purchases$month + 12))
trips$purchase_date <- as.Date(as.character(trips$purchase_date), format = "%Y-%m-%d")
trips$week <- ((as.numeric(trips$purchase_date - firstw)) %/%7 + 1)* 7 + firstw - 1
trips$year <- year(trips$purchase_date)
trips$month <- month(trips$purchase_date)
trips$month <- ifelse(trips$year == 2012, 1, ifelse(trips$year == 2013, trips$month, trips$month + 12))
endweek <- c(min(purchases$week), max(purchases$week))
# Mark CVS
trips$cvs <- ifelse(trips$retailer_code == cvs.ret, 1, 0)
purchases <- merge(purchases, trips[,c("trip_code_uc", "cvs", "channel_type")], by = "trip_code_uc", all.x=T)
# Mark the places that already implement tobacco ban
ma.policy$countynm <- paste(toupper(substring(ma.policy$COUNTY, 1, 1)), tolower(substring(ma.policy$COUNTY, 2)), sep = "")
sort(unique(panelists[panelists$statecode == "MA","countynm"]))
cnty <- c("Berkeley","Daly City","Healdsburg","Hollister","Marin","Richmond","San Francisco","Santa Clara", "Sonoma" )
panelists$ban_ard <- with(panelists, 1*((statecode=="MA" & countynm %in% ma.policy$countynm)| (statecode=="CA" & countynm %in% cnty)))
sort(unique(panelists[panelists$ban_ard==1,"countynm"]))
table(panelists$ban_ard)
# Classify households distance to CVS
tmp <- data.table(panelists)
tmp <- tmp[,list(nzip = length(unique(panelist_zip_code)), nd = length(unique(distance_cvs))), by = list(household_code)]
summary(tmp)
mean(tmp$nzip>1)
mypan <- panelists[panelists$panel_year == 2014,] # Use 2014 panelist profile
median(mypan$distance_cvs, na.rm = T)
mypan <- subset(mypan, !is.na(distance_cvs))
mypan$cvs_in2 <- ifelse(mypan$distance_cvs <=2, 1, 0)
mypan$wgr_in2 <- ifelse(mypan$distance_walgreens <=2, 1, 0)
# Classify light vs heavy smokers
tmp1 <- data.table(purchases)
setkeyv(tmp1, c("household_code", "purchase_date"))
tmp1 <- tmp1[, list(consum = sum(quantity*size/qunit, na.rm=T)/(as.numeric(max(purchase_date)-min(purchase_date)))*7),
by = list(household_code)]
summary(tmp1$consum)
tmp1$heavy <- 1*(tmp1$consum > 2.5)
tmp <- setNames(tmp1$heavy, tmp1$household_code)
mypan$heavy <- tmp[as.character(mypan$household_code)]
mypan[is.na(mypan$heavy), "heavy"] <- 0
# Distribution of the fraction of cigarette spending conditional on CVS visit
tmp <- data.table(subset(purchases, cvs==1 & purchase_date < event.date))
tmp <- tmp[,list(total_price_paid = sum(total_price_paid - coupon_value)), by = list(trip_code_uc)]
tmp <- merge(trips[trips$cvs==1 & trips$purchase_date < event.date, ], tmp, by = "trip_code_uc", all.x = T)
tmp[is.na(tmp$total_price_paid), "total_price_paid"] <- 0
tmp <- data.table(tmp[,c("household_code", "total_price_paid", "total_spent", "purchase_date")])
tmp <- tmp[,list(cig_frac = sum(total_price_paid)/sum(total_spent),
cig_frac_cond = sum(total_price_paid[total_price_paid>0])/sum(total_spent[total_price_paid>0]) ),
by = list(household_code)]
tmp[is.na(tmp)] <- 0
median(tmp[cig_frac>0,cig_frac])
tmp$frac_seg <- ifelse(tmp$cig_frac ==0, "Zero", ifelse(tmp$cig_frac <= median(tmp[cig_frac>0,cig_frac]), "S1", "S2"))
mypan <- merge(mypan, tmp[,list(household_code, frac_seg)], by = "household_code", all.x=T)
mypan[is.na(mypan$frac_seg), "frac_seg"] <- "Never"
mypan$frac_seg <- factor(mypan$frac_seg, levels = c("Never","Zero", "S1", "S2"))
table(mypan$frac_seg)
# Collapse demographic levels
new.col <- list(setNames(c(2500,6500, 9000, 11000, 13500, 17500, 22500, 27500, 32500, 37500, 42500, 47500, 55000, 65000, 75000, 100000),
c(3, 4, 6, 8, 10, 11, 13, 15, 16, 17, 18, 19, 21, 23, 26, 27)), # Income code # Income code
setNames(c(NA, 23, 27, 32, 37, 42, 47, 52, 60, 65), 0:9), # Age code
setNames(c(rep(1,8), 0), 1:9), # Kids code
setNames(c(rep(c("House","Condo"), 3), "Mobile"), 1:7), # Residence code
setNames(c("White", "African American", "Asian", "Other"), 1:4), # Race code
setNames(c(NA, rep("Employed", 3), "Unemployed",
rep(c("Employed", "Both employed", "Both employed", "Both employed", "Only one employed"), 3),
"Unemployed", "Only one employed", "Only one employed", "Only one employed", "Both unemployed"),
do.call(paste, expand.grid(c(0,1:3,9), c(0, 1:3, 9))) ) )
names(new.col) <- c("household_income", "age", "age_and_presence_of_children", "residence", "race", "employment")
new.col
mypan$income <- new.col[["household_income"]][as.character(mypan$household_income)]
mypan$male_head_age <- new.col[["age"]][as.character(mypan$male_head_age)]
mypan$female_head_age <- new.col[["age"]][as.character(mypan$female_head_age)]
mypan$age <- rowMeans(mypan[,c("female_head_age", "male_head_age")], na.rm=T)
mypan$have_kids <- new.col[["age_and_presence_of_children"]][as.character(mypan$age_and_presence_of_children)]
mypan$employment <- paste(mypan$male_head_employment, mypan$female_head_employment)
mypan$employment <- new.col[["employment"]][as.character(mypan$employment)]
mypan$employment <- factor(mypan$employment, levels = c("Unemployed", "Employed", "Only one employed", "Both employed", "Both unemployed"))
mypan$race <- factor(new.col[["race"]][as.character(mypan$race)], levels = new.col[["race"]])
demo.col <- c("income", "age", "have_kids", "employment", "race")
sel <- sapply(demo.col, function(i) is.numeric(mypan[,i]))
summary(mypan[,demo.col[sel]])
lapply(demo.col[!sel], function(i) table(mypan[,i]))
sel <- apply(mypan[,demo.col], 1, function(x) any(is.na(x)))
cat(sum(sel), "Households have missing demogrpahics.\n")
mypan <- mypan[!sel,]
# For this analysis, we only focus on CVS shoppers.
cat(sum(mypan$frac_seg == "Never"),"households out of ", nrow(mypan), "never shopped at CVS, so drop them for this current analysis.\n")
mypan <- subset(mypan, frac_seg != "Never")
purchases <- subset(purchases, household_code %in% mypan$household_code)
trips <- subset(trips, household_code %in% mypan$household_code)
# -------------------------- #
# Fill in non-puchases months #
# Complete month for each household
tmp <- data.table(trips)
tmp <- tmp[,list(start = min(month), end = max(month)), by = list(household_code)]
tmp <- tmp[, n:= end-start]
tmp1 <- lapply(1:nrow(tmp), function(i) tmp[i,start] + c(0:tmp[i,n]))
names(tmp1) <- tmp$household_code
tmp1 <- melt(tmp1)
names(tmp1) <- c("month", "household_code")
tmp1$household_code <- as.numeric(tmp1$household_code)
# Trips and spending
tmp2 <- data.table(trips)
tmp2 <- tmp2[,list(total_spent = sum(total_spent)), by = list(household_code, month, purchase_date, channel_type, retailer_code, cvs)]
tmp2 <- tmp2[,list( trip_cvs = length(purchase_date[cvs==1]),
trip_othdrug = length(purchase_date[channel_type == "Drug Store" & cvs ==0] ),
trip_othchannel = length(purchase_date[channel_type != "Drug Store"]),
trip_grocery = length(purchase_date[channel_type == "Grocery"]),
trip_discount = length(purchase_date[channel_type == "Discount Store"]),
trip_convenience= length(purchase_date[channel_type == "Convenience Store"]),
trip_service = length(purchase_date[channel_type == "Service Station"]),
trip_gas = length(purchase_date[channel_type == "Gas Mini Mart"]),
dol_cvs = sum(total_spent*cvs, na.rm = T),
dol_othdrug = sum(total_spent*(1-cvs)*1*(channel_type == "Drug Store"), na.rm = T),
dol_othchannel = sum(total_spent*1*(channel_type != "Drug Store"), na.rm = T),
dol_grocery = sum(total_spent*1*(channel_type == "Grocery"), na.rm = T),
dol_discount = sum(total_spent*1*(channel_type == "Discount Store"),na.rm=T),
dol_convenience = sum(total_spent*1*(channel_type == "Convenience Store"),na.rm=T),
dol_service = sum(total_spent*1*(channel_type == "Service Station"), na.rm=T),
dol_gas = sum(total_spent*1*(channel_type == "Gas Mini Mart"),na.rm=T),
dol_total = sum(total_spent)
),
by = list(household_code, month)]
dim(tmp1); dim(tmp2)
sum(is.na(tmp2))
summary(tmp2[,list(trip_cvs, trip_othdrug, trip_othchannel)])
mydata <- merge(tmp1, tmp2, by = c("household_code", "month"), all.x = T)
dim(mydata)
# Cigarette spending
# Actual cigarette purchases
tmp3 <- data.table(purchases)
tmp3 <- tmp3[,list( q = sum(quantity*size/qunit, na.rm=T),
cigdol = sum(total_price_paid - coupon_value, na.rm=T),
cigdol_cvs = sum((total_price_paid - coupon_value)*cvs, na.rm=T),
cigdol_othdrug = sum((total_price_paid - coupon_value)*(1-cvs)*1*(channel_type == "Drug Store"), na.rm=T),
cigdol_othchannel= sum((total_price_paid - coupon_value)*1*(channel_type != "Drug Store"), na.rm=T)),
by = list(household_code, month)]
mydata <- merge(mydata, tmp3, by = c("household_code", "month"), all.x = T)
sel <- is.na(mydata)
mydata[sel] <- 0
mydata$netdol <- with(mydata, dol_total - cigdol)
mydata$netdol_cvs <- with(mydata, dol_cvs - cigdol_cvs)
mydata$netdol_othdrug <- with(mydata, dol_othdrug - cigdol_othdrug)
mydata$netdol_othchannel<- with(mydata, dol_othchannel - cigdol_othchannel)
cat("Summary stats:\n"); print(summary(mydata[, -c(1:2)])); cat("\n")
# Calculate pre-event shopping behavior for each household
# NOTE: we have NAs for some trend measurement;
tmp2 <- data.table(subset(mydata, month < event.month))
tmp2 <- tmp2[,list( pre_q = mean(q),
pre_trip_cvs = mean(trip_cvs),
pre_trip_othdrug = mean(trip_othdrug),
pre_trip_othchannel = mean(trip_othchannel),
pre_dol_cvs = mean(dol_cvs),
pre_dol_othdrug = mean(dol_othdrug),
pre_dol_othchannel = mean(dol_othchannel),
pre_trip_cvs_H1 = mean(trip_cvs[month<=6]),
pre_trip_othdrug_H1 = mean(trip_othdrug[month<=6]),
pre_trip_othchannel_H1 = mean(trip_othchannel[month<=6]),
pre_dol_cvs_H1 = mean(dol_cvs[month<=6]),
pre_dol_othdrug_H1 = mean(dol_othdrug[month<=6]),
pre_dol_othchannel_H1 = mean(dol_othchannel[month<=6]),
pre_trip_cvs_H2 = mean(trip_cvs[month>6 & month<=12]),
pre_trip_othdrug_H2 = mean(trip_othdrug[month>6 & month<=12]),
pre_trip_othchannel_H2 = mean(trip_othchannel[month>6 & month<=12]),
pre_dol_cvs_H2 = mean(dol_cvs[month>6 & month<=12]),
pre_dol_othdrug_H2 = mean(dol_othdrug[month>6 & month<=12]),
pre_dol_othchannel_H2 = mean(dol_othchannel[month>6 & month<=12]),
pre_trip_cvs_H3 = mean(trip_cvs[month>12 & month<=18]),
pre_trip_othdrug_H3 = mean(trip_othdrug[month>12 & month<=18]),
pre_trip_othchannel_H3 = mean(trip_othchannel[month>12 & month<=18]),
pre_dol_cvs_H3 = mean(dol_cvs[month>12 & month<=18]),
pre_dol_othdrug_H3 = mean(dol_othdrug[month>12 & month<=18]),
pre_dol_othchannel_H3 = mean(dol_othchannel[month>12 & month<=18]),
pre_trip_cvs_H4 = mean(trip_cvs[month>18 & month < event.month]),
pre_trip_othdrug_H4 = mean(trip_othdrug[month>18 & month < event.month]),
pre_trip_othchannel_H4 = mean(trip_othchannel[month>18 & month < event.month]),
pre_dol_cvs_H4 = mean(dol_cvs[month>18 & month < event.month]),
pre_dol_othdrug_H4 = mean(dol_othdrug[month>18 & month < event.month]),
pre_dol_othchannel_H4 = mean(dol_othchannel[month>18 & month < event.month])
), by = list(household_code)]
summary(tmp2)
mypan <- merge(mypan, tmp2, by = "household_code", all.x = T)
# Check any missing values in the panelist data
demo.col
bhv.col <- c("pre_trip_cvs", "pre_trip_othdrug", "pre_trip_othchannel", "pre_dol_cvs", "pre_dol_othdrug", "pre_dol_othchannel")
(bhv.col <- paste(rep(bhv.col, 4), "_H", rep(1:4, each = 6), sep=""))
sapply(bhv.col, function(i) sum(is.na(mypan[,i])))
sel <- apply(mypan[,c(demo.col,bhv.col)], 1, function(x) any(is.na(x)))
if(sum(sel) > 0){
cat(sum(sel), "households have missing values in their behavioral metrics, so we drop them for this analysis. \n")
mypan <- mypan[!sel,]
mydata <- subset(mydata, household_code %in% mypan$household_code)
trips <- subset(trips, household_code %in% mypan$household_code)
purchases <- subset(purchases, household_code %in% mypan$household_code)
}
# Drop outliers
summary(mypan$pre_q)
sum(mypan$pre_q > 50); mean(mypan$pre_q > 50)
cat(sum(mypan$pre_q > 50), "housheolds are dropped as outliers because the average monthly cigarette consumption is greater than 50 packs. \n")
mypan <- subset(mypan, pre_q <= 50)
mydata<- subset(mydata, household_code %in% mypan$household_code)
dim(mypan); length(unique(mydata$household_code))
# Create other control variables: month
mydata$year <- ifelse(mydata$month > 12, 2014, 2013)
mydata$month1 <- mydata$month %% 12
mydata$month1 <- ifelse(mydata$month1 == 0, 12, mydata$month1)
mydata$month1 <- factor(mydata$month1)
dim(mydata)
mydata <- merge(mydata, mypan[,c("household_code", "panelist_zip_code", "distance_cvs", "cvs_in2", "wgr_in2", "heavy", "smk", "ban_ard", "frac_seg",demo.col)],
by = "household_code", all.x=T)
dim(mydata)
mydata$after <- 1*(mydata$month >= event.month)
mydata$hhmonth <- paste(mydata$household_code, mydata$month, sep="-")
# Construct treatment and control
mydata$treat <- with(mydata, 1*(smk == 1 & ban_ard == 0))
mypan$treat <- with(mypan, 1*(smk == 1 & ban_ard == 0))
cat("Table of treatment:\n")
table(mydata$treat)
table(mypan$treat)
table(mypan$smk, mypan$frac_seg)
table(mypan$treat, mypan$frac_seg)
trips <- merge(trips, mypan[,c("household_code", "treat")], by = "household_code", all.x = T)
purchases <- merge(purchases, mypan[,c("household_code", "treat")], by = "household_code", all.x = T)
rm(list = c("tmp", "tmp1", "tmp2", "tmp3"))
#############################
# Propensity score matching #
#############################
# Aggregate over 3 month window around the event
num.month <- 3
demo.col <- c("income", "age", "have_kids", "employment", "race", "distance_cvs",
"pre_trip_cvs", "pre_trip_othdrug", "pre_trip_othchannel", "pre_dol_cvs", "pre_dol_othdrug", "pre_dol_othchannel")
match.shtdat <- data.table(subset(mydata, month >= event.month - num.month & month <= event.month + num.month -1 ))
match.shtdat <- match.shtdat[, list( trip_cvs = sum(trip_cvs)/num.month, trip_othdrug = sum(trip_othdrug)/num.month, trip_othchannel = sum(trip_othchannel)/num.month,
dol_cvs = sum(dol_cvs)/num.month, dol_othdrug = sum(dol_othdrug)/num.month, dol_othchannel = sum(dol_othchannel)/num.month,
netdol_cvs = sum(netdol_cvs)/num.month, netdol_othdrug = sum(netdol_othdrug)/num.month, netdol_othchannel = sum(netdol_othchannel)/num.month,
dol_total = sum(dol_total)/num.month, netdol = sum(netdol)/num.month),
by = list(treat, household_code, after, frac_seg, smk)]
match.shtdat <- match.shtdat[, drop:= 1*(length(after) < 2), by = list(household_code)]
table(match.shtdat$drop) # 1 household did not both 2 period data
match.shtdat <- subset(match.shtdat, drop == 0)
setkeyv(match.shtdat, c("household_code","after"))
match.shtdat <- match.shtdat[,list( trip_cvs = diff(trip_cvs), trip_othdrug = diff(trip_othdrug), trip_othchannel = diff(trip_othchannel),
dol_cvs = diff(dol_cvs), dol_othdrug = diff(dol_othdrug), dol_othchannel = diff(dol_othchannel),
netdol_cvs = diff(netdol_cvs), netdol_othdrug = diff(netdol_othdrug), netdol_othchannel = diff(netdol_othchannel),
dol_total = diff(dol_total), netdol = diff(netdol)),
by = list(household_code, treat, frac_seg, smk)]
match.shtdat <- data.frame(match.shtdat)
match.shtdat <- merge(match.shtdat, mypan[,c("household_code", demo.col)], by = "household_code", all.x = T)
match.shtdat <- match.shtdat[order(match.shtdat$household_code),]
mypan <- mypan[order(mypan$household_code), ]
mypan1 <- subset(mypan, household_code %in% match.shtdat$household_code)
max(abs(mypan1$household_code - match.shtdat$household_code))
# Run logit propensity score
dv.col <- c("trip_cvs", "trip_othdrug", "trip_othchannel", "dol_cvs", "dol_othdrug", "dol_othchannel",
"netdol_cvs", "netdol_othdrug", "netdol_othchannel", "dol_total", "netdol")
fml <- treat ~ income + age + have_kids + employment + race + distance_cvs +
pre_trip_cvs+pre_trip_othdrug +pre_trip_othchannel +pre_dol_cvs +pre_dol_othdrug +pre_dol_othchannel
# Different treatment construction
sel1 <- !(match.shtdat$frac_seg == "Zero" & match.shtdat$smk == 1)
sel2 <- !(mypan1$frac_seg == "Zero" & mypan1$smk == 1)
sum(sel1)
sum(sel2)
# Set matching parameters
my.ratio <- 1
my.commonspt<- TRUE
my.caliper <- .25
numbt <- 500
psmod <- glm(fml,data = mypan1[sel2,], family=binomial )
summary(psmod)
pshat <- psmod$fitted
Tr <- mypan1[sel2,"treat"]
est.ols <- est.mat <- est.fe <- matrix(NA, length(dv.col), 4,
dimnames = list(dv.col, c("Estimate", "Std. Error", "t value", "Pr(>|t|)")))
for(i in 1:length(dv.col)){
# dv.fml <- as.formula(paste(dv.col[i], "~ treat"))
dv.fml <- as.formula(paste(dv.col[i], "~ treat + income + age + have_kids + employment + race + distance_cvs +
pre_trip_cvs+pre_trip_othdrug +pre_trip_othchannel +pre_dol_cvs +pre_dol_othdrug +pre_dol_othchannel"))
# OLS
est.ols[i,] <- coeftest(lm(dv.fml, data = match.shtdat[sel1,]))["treat",]
# Matching without replacement
# Notice that without replacement, the funciton does not return Abadie-Imbenns se.
rr <- Match(Y=match.shtdat[sel1,dv.col[i]], Tr=Tr, X=pshat, M=my.ratio,
replace = FALSE, CommonSupport = my.commonspt, caliper = my.caliper)
print(summary(rr))
est.mat[i,] <- c(rr$est, rr$se.standard, rr$est/rr$se.standard, 2*pt(-abs(rr$est/rr$se.standard), df = rr$wnobs -1 ))
# Fixed effect model with matched households
if(i == 1){
tmpdat <- subset(mydata, household_code %in% mypan1[unlist(rr[c("index.treated", "index.control")]), "household_code"] &
month >= event.month - num.month & month <= event.month + num.month -1 &
!(frac_seg == "Zero" & smk == 1))
}
tmp <- plm(as.formula(paste(dv.col[i], "~ treat + treat*after + after")), data = tmpdat,
index = c("household_code", "month"), model = "within")
cls.v <- Cls.se.fn(tmp, cluster.vec = tmpdat[,"household_code"], return.se = FALSE)
est.fe[i,] <- coeftest(tmp, vcov = cls.v)["treat:after",]
}
class(est.ols) <- class(est.mat) <- class(est.fe) <- "coeftest"
# Check matching balance
mb <- MatchBalance(fml, data=mypan1[sel2,], match.out=rr, nboots=numbt)
tmp1<- sapply(mb$BeforeMatching, function(x) c(x$mean.Co,x$mean.Tr, x$sdiff.pooled/100, x$tt$p.value,
ifelse("ks" %in% names(x), x$ks$ks.boot.pvalue, x$tt$p.value) ))
tmp2<- sapply(mb$AfterMatching, function(x) c(x$mean.Co,x$mean.Tr, x$sdiff.pooled/100, x$tt$p.value,
ifelse("ks" %in% names(x), x$ks$ks.boot.pvalue, x$tt$p.value) ))
tmp <- model.matrix(psmod)[,-1]
tmp.lab <- c("Income", "Age", "Have kids", paste("Employment:", levels(mypan$employment)[-1]), paste("Race:", levels(mypan$race)[-1]),
"Distance to CVS", "No. trips to CVS/m.", "No. trips to other drug stores/m.", "No. trips to other channels/m.",
"Expenditure at CVS/m.", "Expenditure at other drug stores/m.", "Expenditure at other channels/m.")
cbind(colnames(tmp), tmp.lab)
dimnames(tmp1) <- dimnames(tmp2) <- list(c("Mean Control", "Mean Treatment", "Std difference","t p-value", "KS bootstrap p-value"), tmp.lab)
blc <-cbind(t(tmp1), t(tmp2))
nn <- c(length(unique(rr$index.control)), length(unique(rr$index.treated)) )
cat("The number of unique households:\n"); print(nn); cat("\n")
cat("Balance check:\n"); print(round(blc,2)); cat("\n")
# Print results
stargazer(list(OLS = est.ols, Matching = est.mat, Panel = est.fe), type = "text",
column.labels = c("OLS", "Matching", "Panel"))
myline <- paste(names(nn), " (", nn, ")", sep="", collapse = ",")
stargazer(list(blc, list(est.ols, est.mat, est.fe)), type = "html", summary = FALSE, align = TRUE, no.space = TRUE, digits = 2,
title = c("Balance Check among smokers", "Before-after difference between treatment and control group for the matched sample during 201406 - 201411"),
notes = myline,
out = paste(plot.wd, "/tb_", out.file, "_balance_",Sys.Date(), ".html", sep=""))
sink()
save.image(file = paste(plot.wd,"/", out.file, ".rdata", sep=""))
cat("This program is done.\n")
|
6290e44e2cad3914082041df9b6665f259bc568e | 9a5cb00b6e1a8ebedf4d8e0faf3dec1495ff7a70 | /tests/testthat/test-average_monthly_temperature.R | 8b0df9555b28d786fe3af50bd59603d2cebaf6c8 | [] | no_license | raefuhrman/262_Final_Package | 5c55ab5ef08acf2d68eb5f565d162e03b374f899 | 03c60436e8cf8db98645df18a8bd0a272c1e426e | refs/heads/master | 2020-05-31T19:53:01.401607 | 2019-06-14T05:04:01 | 2019-06-14T05:04:01 | 190,464,059 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | r | test-average_monthly_temperature.R | context("test-average_monthly_temperature")
test_that("monthly average temperature is above freezing in Santa Barbara", {
expect_true(mean(!is.na(climate_data$TMAX))<=90)
})
|
bc7fae8076513e1f320769599746195eb427cd16 | e126070d4250c376d82f1cef232ed6a4e20f55af | /day04.R | 33322bd45b71a351c12c32dfc84d72e785c772ae | [
"MIT"
] | permissive | FloHu/advent_of_code_2018 | 8bdb54af080489d45a9addf4479e58e178311b6e | 71d43b52a982ae8b871ca94382e9b3081ede8837 | refs/heads/master | 2020-04-10T00:49:13.318944 | 2018-12-16T16:25:17 | 2018-12-16T16:25:17 | 160,697,493 | 0 | 0 | null | 2018-12-06T16:02:09 | 2018-12-06T15:54:03 | R | UTF-8 | R | false | false | 1,904 | r | day04.R | library(lubridate)
library(tidyverse)
day4 <- tibble(input = readLines("input_day04.txt"))
day4$unparsed <- str_extract(day4$input, "^\\[.+\\]")
day4$event <- str_extract(day4$input, "(begins shift)|(falls asleep)|(wakes up)")
day4$date_and_time <- ymd_hm(day4$unparsed)
day4 <- arrange(day4, date_and_time)
head(day4)
on_duty <- character(length = nrow(day4))
my_pattern <- "Guard #\\d{1,}"
for (r in seq_len(nrow(day4))) {
grepped <- str_extract(day4$input[r], pattern = my_pattern)
if (!is.na(grepped)) {
guard <- grepped
}
on_duty[r] <- guard
}
day4$on_duty <- on_duty
head(day4)
all_guards <- unique(day4$on_duty)
day4_s <- day4 %>%
group_by(on_duty) %>%
summarise(
sleep_from = list(date_and_time[which(event == "falls asleep")]),
sleep_to = list(date_and_time[which(event == "wakes up")])
)
day4_s$sleep_intervls <- map2(day4_s$sleep_to, day4_s$sleep_from,
function(.x, .y) {
return(.x - .y)
})
day4_s$total_sleep <- map2_dbl(day4_s$sleep_to, day4_s$sleep_from,
function(.x, .y) {
return(sum(.x - .y))
})
day4_s$sleep_rngs <- map2(day4_s$sleep_to, day4_s$sleep_from,
function(.x, .y) {
mins_to <- minute(.x)-1
mins_from <- minute(.y)
return(mapply(FUN = `:`, mins_from, mins_to))
})
day4_s$most_common_minute <-
lapply(day4_s$sleep_rngs, function(x) {
if (length(x) == 0) return(list(0, 0))
xrle <- rle(sort(unlist(x)))
most_common <- xrle$values[which.max(xrle$lengths)]
how_often <- max(xrle$lengths)
return(list(most_common = most_common, how_often = how_often))
})
day4_s$how_often <- sapply(day4_s$most_common_minute, `[[`, 2)
day4_s$most_common_minute <- sapply(day4_s$most_common_minute, `[[`, 1)
day4_s <- arrange(day4_s, desc(total_sleep))
day4_s
cat("The answer to task 1 is ", 3167*45, "\n")
day4_s <- arrange(day4_s, desc(how_often))
day4_s
cat("The answer to task 1 is ", 179*30, "\n")
|
a30c10a6100ac281d87a1137197b2334f5d4c46a | 1db3390483611dad623d984fc1d18c277af3ed4e | /man/hpcc.showFilesToDownload.Rd | 3411d54454247d7d7347c036570014786022b1a4 | [] | no_license | Saulus/rHpcc | 7105a535b4b62c736625c74175114ea61e7aa30c | 5fef5811fe0a63555a66e30c05bb4ffef46ad7ce | refs/heads/master | 2021-01-14T14:10:50.315049 | 2014-11-24T09:17:54 | 2014-11-24T09:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,334 | rd | hpcc.showFilesToDownload.Rd | \name{hpcc.showFilesToDownload}
\alias{hpcc.showFilesToDownload}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
title
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
hpcc.showFilesToDownload()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
if (dim(.hpccSessionVariables)[1] == 0) {
print("No Files To download")
return()
}
print("Below are the files available for download")
for (i in seq(1, dim(.hpccSessionVariables)[1])) {
print(.hpccSessionVariables[i, 2])
}
inp <- 1
while (inp != 0) {
inp <- readline(prompt = "Type the number of the file To Download or 0 to exit : ")
inp <- as.numeric(inp)
if (inp > 0 & inp <= dim(.hpccSessionVariables)[1]) {
numb <- as.numeric(inp)
if (.hpccSessionVariables[numb, 3] > 0)
print("File already downloaded:")
else {
nameOfFile <- paste(.hpccSessionVariables[numb,
2], ".csv", sep = "")
url <- .hpcc.formURL(nameOfFile)
print(url)
.hpcc.downloadFile(url, nameOfFile)
numberOfDown <- sum(as.numeric(.hpccSessionVariables[,
3] > 0)) + 1
.hpccSessionVariables[numb, 3] <- numberOfDown
}
}
else if (inp != 0)
print("Invalid Input")
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
081411e8e9b2ffc922b22c2f5576a1b07a0f26aa | 9a785ddd51dec475f1821ae864f43db08f791aa2 | /cachematrix.R | a1fd9479afa6101e3f1ae9b443d02c29da034a31 | [] | no_license | knockoutned/ProgrammingAssignment2 | 3b2249737c929ef2a4e7e96738d7a9d7a78228de | f70aa11d505a6785ffa54b3d167395b896fb6760 | refs/heads/master | 2021-01-22T14:28:46.040497 | 2015-09-27T16:36:51 | 2015-09-27T16:36:51 | 43,254,490 | 0 | 0 | null | 2015-09-27T16:33:11 | 2015-09-27T16:33:11 | null | UTF-8 | R | false | false | 1,721 | r | cachematrix.R | ## This function takes a matrix and stores the inverse for future computations.
## This saves time and processing power.
## makeCacheMatrix creates a list of four functions
## set the value of a matrix
## get returns the matrix
## setinverse computes the inverse
## getinverse returns the inverse
makeCacheMatrix <- function(x = matrix()) { ##This creates a blank matrix x
inv <- NULL ##Sets the default value for m within the function equal to NULL
set <- function(y) {
x <<- y ##Sets the value of y outside of this function equal to x
inv <<- NULL ##Sets the value of inv outside of this function equal to NULL
}
get <- function() x ##Assigns value for function 'get' equal to the input matrix
setinverse <- function(solve) inv <<- solve ##Assigns values for 'setinverse', and 'inv' in the outside environment
getinverse <- function(solve) inv ##Returns inv (which is NULL in this case)
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve returns the inverse of the matrix, but tests if the inverse has already been calculated.
## If it has, it moves on. If it has not, it calculates it.
cacheSolve <- function(x, ...) {
inv <- x$getinverse() ##Sets inv equal to value of getinverse function (which is NULL)
if(!is.null(inv)) { ##Conditional: if not equal to NULL (has the inverse already been calculated)
message ("getting cached data.")
return(inv)
}
data <- x$get() ##Sets data equal to the results of get() function
inv <- solve(data) ##Calculates the inverse of data (which equals x) and stores it as inv
x$setinverse(inv) ##Stores inverse value in the cache using the setinverse function
inv
}
|
d71ec03083012008c16eb0be95882620e118f5f9 | 36ec01189d805df6508547bac447bee6b52e62f1 | /markdown_files/Transcriptomic/Pipeline_Scripts/2D_WGCNA/auto_WGCNA_networkCreator.R | f0d1baa95bf2cbbfb0957299f5327d270a4116ba | [] | no_license | epigeneticstoocean/2017OAExp_Oysters | 670b52b51e17fd3b8eda62f69dae28815417a4d5 | 2afedfd6613533708afe2fafe4f1ebad2c5f4f0e | refs/heads/master | 2021-07-06T16:37:38.137062 | 2020-07-30T21:33:59 | 2020-07-30T21:33:59 | 136,628,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,231 | r | auto_WGCNA_networkCreator.R |
setwd("/shared_lab/20180226_RNAseq_2017OAExp/RNA/WGCNA");
library(WGCNA)
library(tximport)
# The following setting is important, do not omit.
options(stringsAsFactors = FALSE);
# Allow multi-threading within WGCNA.
# Caution: skip this line if you run RStudio or other third-party R environments.
# See note above.
enableWGCNAThreads(nThreads = 40)
# Load the data saved in the first part
lnames = load(file = "20180513_salmonrun_consensus.RData");
# The variable lnames contains the names of loaded variables.
#lnames
writeLines("Starting net function....")
net = blockwiseModules(datExpr, power = 6,
TOMType = "unsigned", minModuleSize = 15,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE, pamRespectsDendro = FALSE,
saveTOMs = TRUE,
saveTOMFileBase = "20180513_SalmonRunTOM",
verbose = 3)
writeLines("Net function complete, storing results....")
moduleLabels = net$colors;
moduleColors = labels2colors(net$colors);
MEs = net$MEs;
geneTree = net$dendrograms[[1]];
save(MEs, moduleLabels, moduleColors, geneTree,
file = "20180513_SalmonRun_network_auto.RData"); |
13a0a77f67a4c408b48e65ec620dc9f517f20412 | 26806c9ca283fe47df1f13b17856f17717f65dae | /old/ProcessBFXTX_CalcDPine.R | e459b7d6b15504b954c47dfca2a0d529fbf756be | [] | no_license | DrK-Lo/src_PineSpruceFunctions | 7248058cd965c227ddd8a28ccc62ec72388b4794 | 77649fe0b31ad61e8882788d560913c840ac04ca | refs/heads/master | 2020-12-24T14:35:46.269943 | 2015-04-11T20:06:09 | 2015-04-11T20:06:09 | 27,262,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,270 | r | ProcessBFXTX_CalcDPine.R |
### Seqcap Pine: calculate multivariate BF for each of 6 groups
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(1, "Pine_SeqCap.XTX.BFMAF.v2")
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(2, "Pine_SeqCap.XTX.BFMAF.v2")
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(3, "Pine_SeqCap.XTX.BFMAF.v2")
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(4, "Pine_SeqCap.XTX.BFMAF.v2")
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(5, "Pine_SeqCap.XTX.BFMAF.v2")
>screen
>R
source("ProcessBFXTX_CalcD.R")
runGroupPine(6, "Pine_SeqCap.XTX.BFMAF.v2")
### Seqcap GBS: calculate multivariate BF for each of 6 groups
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 1)
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 2)
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 3)
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 4)
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 5)
>screen
>R
file <- "Pine_GBS.XTX.BFMAF.v2"
source("ProcessBFXTX_CalcD.R")
runGroupPine(file, 6)
|
cb3efa50145f4a11e0f2c13f98c8f8e020edba5b | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query01_query45_1344n/query01_query45_1344n.R | 56f95361b0eda47b2cc67e46268ae20a280b8476 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 71 | r | query01_query45_1344n.R | 30aa25e084e2f0f3134bd4fef737a6bb query01_query45_1344n.qdimacs 415 1173 |
2971a1efc84f62b6c207f94e12c13dd92015619f | f5131867a76c8c8af6ec863120c1e72722ea98d9 | /R/data_processing/data_preparing/MakeFeaturesValTable.R | 79f39475cc2db3398aacd26c53c7d704dfd4340c | [] | no_license | k198581/src | c5b53b1f6eeae36116fb2063fdf993df303f1a57 | 17d2c54489adac2f428b6f449661712b1f7bf536 | refs/heads/master | 2022-11-22T09:01:34.948438 | 2020-07-12T09:57:50 | 2020-07-12T09:57:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,115 | r | MakeFeaturesValTable.R | # load the library.
library(openxlsx)
# load the function.
source("data_processing/MakeWordList.R")
MakeFeaturesValTable <- function(input.path = "../../Data/test_data.xlsm", output.path = "features/") {
# makes the table of the phoneme feature values which each phonetic symbol has.
#
# Args:
# input.path: The path of the directory which is to make the table of the phoneme feature values table.
# output.path: The path of the directory which the table is outputed.
#
# Returns:
# The exit status.
if (!dir.exists(output.path)) {
dir.create(output.path)
}
# output vowel sheet
i <- 2
v.name <- "vowels_values"
sheet <- read.xlsx(input.path, sheet = i)[ 1:37, 3:7]
write.table(sheet,
paste(output.path, v.name, sep = "/"),
row.names = F,
col.names = F)
# output consonant sheet
i <- 4
c.name <- "consonants_values"
sheet <- read.xlsx(input.path, sheet = i)[1:81, 3:7]
write.table(sheet,
paste(output.path, c.name, sep = "/"),
row.names = F,
col.names = F)
return(0)
}
|
166ad77bef06b6591a51ad2d20fbface95de8073 | e58809646fe42da96f82194d0771ab379dd9e18f | /man/sce_merge_stats.Rd | 49cfe90d4be23f1d69dea51098857ec1aa056bbd | [
"MIT"
] | permissive | JamesOpz/splitRtools | 5adcbd15ceaf57b5786a3cb2e315a3a80e052e2c | 3d3b5a2eed5a198d4edcd7d415d4d62609e1b5a0 | refs/heads/main | 2023-07-20T04:27:18.271502 | 2023-07-06T14:41:16 | 2023-07-06T14:41:16 | 459,172,100 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 542 | rd | sce_merge_stats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sce_merge_utils.R
\name{sce_merge_stats}
\alias{sce_merge_stats}
\title{create sequencing run statistics for merged sublibraries}
\usage{
sce_merge_stats(sce_split, output_folder, exp_name)
}
\arguments{
\item{sce_split}{An input merged SCE experiment object}
}
\value{
SCE object with seq statistics embedded in the SCE metadata
}
\description{
create sequencing run statistics for merged sublibraries
}
\author{
James Opzoomer \email{james.opzoomer@gmail.com}
}
|
b651d36befe36d7ae7a7bb6ea3395853a60463f5 | 5a4f7bb7eb437a0118beb13d39b65bfd6be11199 | /lineId/man/obs_data_filter.Rd | 4ef071fa650075f32c393c5e1cf466d105e52cab | [] | no_license | snarles/fmri | f4c35a560813b7e0f7fb669a2ce85d456cd554ca | 43c81b184745ca7ea462400ef2d576967cc5b86b | refs/heads/master | 2021-01-17T00:59:51.226279 | 2019-12-06T20:07:10 | 2019-12-06T20:07:10 | 35,619,927 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 395 | rd | obs_data_filter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{obs_data_filter}
\alias{obs_data_filter}
\title{Extracts observed data from a list generated by gen_data}
\usage{
obs_data_filter(X, X_te, Y, y_star, ...)
}
\description{
Use with \code{do.call}
}
\examples{
pars <- gen_params()
dat <- do.call(gen_data, pars)
obs <- do.call(obs_data_filter, dat)
}
|
1f005c2d0d9259f63472503821b2d2386413c158 | 0e6aa31985a686fc47c2e563c561e4c69dc843bd | /man/getZh.Rd | d6dfc663faa1a4d937d4b17c14168565676733e8 | [] | no_license | cran/CompR | c6aeb96b1fddc5c3690a2d5a7f86c9bc17b49b23 | 1fc9e8c2bf779da034cba56bddf26ac1d9afe794 | refs/heads/master | 2021-01-10T20:55:35.251515 | 2015-07-01T00:00:00 | 2015-07-01T00:00:00 | 38,376,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 671 | rd | getZh.Rd | \name{getZh}
\alias{getZh}
\title{
Gets the result of the function \code{EstimBradley()}
}
\description{
Gets the posterior probabilities for each individual to belong to the different classes and the class with the higher probability.
}
\usage{
getZh(object)
}
\arguments{
\item{object}{
An object of class \code{BradleyEstim}}
}
\value{
Object of class \code{matrix} with the posterior probabilities for each individual to belong to the different classes and the class with the higher probability.
}
\examples{
data(Cocktail)
ResCock2<-EstimBradley(Cocktail,Constraint=0,Tcla=2,eps=1e-04,eps1=1e-04,TestPi=TRUE)
ResCock2_Zh<-getZh(ResCock2)
} |
35ddc386a5d1d06c9e34cdfc8145010065a1c77d | 89d75a9e4b1c3e61d8e7620e4c2e8a7f08541aa6 | /scripts/Variants_distribution_4cultures.R | 571d64e1836829e1b55442c0a62b262b163af340 | [] | no_license | rmakaren/Genomics-quiescence-mutation-detection | 6a803b526a45b0e09d113d9075e62079670b39c7 | 856567f3308836ad3a6753379e5bf13167a309fd | refs/heads/master | 2023-02-16T17:08:10.792630 | 2021-01-17T19:44:38 | 2021-01-17T19:44:38 | 206,559,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,976 | r | Variants_distribution_4cultures.R | # This script analysis NGS data from targeted resequencing 1000 colonies after Day1, 2 months, 3 months of quiescence
# of PCR products of 9 genes that undergo positive selection during quiescence
#set working directory
#setwd("/Volumes/@Dyngen/Article_Rostyslav/Figures/Figures_4-6/4_cultures")
setwd("/home/rostyslav/Desktop/R_analysis/PhD_project/NGS_sequencing/4_cultures")
#load the libraries
library(ggplot2) # plotting the data
library(DataCombine)
library(scales)
library(grid)
library(gridExtra)
library(plyr)
library(dplyr)
require(scales)
library(data.table)
library(stringr)
library(tidyverse)
library(gsubfn)
#################################################################
## MULTIPLE SAMPLE ANALYSIS
#################################################################
##--------------------------------------------------
## data preprocessing
##--------------------------------------------------
##1. identify files to analyse
filesToProcess <- dir(pattern = "*.txt$")
##2. Iterate over each of the file names with lapply
listOfFiles <- lapply(filesToProcess, function(x) read.table(x, header = T, sep ="\t", na.strings=c("", "NA")))
names(listOfFiles) <- filesToProcess # assign the names of the files to the dataframes
listOfFiles <- Map(cbind, listOfFiles, Sample = names(listOfFiles)) # add the column with dataframe name to the dataframes
##--------------------------------------------------
## data analysis
##--------------------------------------------------
##3. removing unnessesary factors variables
# apply function percentage transform to the each of the Varfreq column
listOfFiles <- lapply(listOfFiles, function(x) {x$VarFreq <- sapply(x$VarFreq, percentage_transform)
x})
# transform all the dataframes names from last column into character
listOfFiles <- lapply(listOfFiles, function(x) {x[20] <- lapply(x[20], as.character)
x})
# transform all the dataframes names from last column into character
listOfFiles <- lapply(listOfFiles, function(x) {x$Ref <- lapply(x$Ref, as.character)
x})# transform all the dataframes names from last column into character
listOfFiles <- lapply(listOfFiles, function(x) {x$VarAllele <- lapply(x$VarAllele, as.character)
x})
# add percentage column for plotting further in the script
listOfFiles <- lapply(listOfFiles, function(x) {x$percentage <- paste0(x$VarFreq*100, rep("%"))
x})
##4. allele candidates filtering
#optional: select positions where VarAllele != NA
listOfFiles <- lapply(listOfFiles, subset, is.na(VarAllele) == F)
# keep the variants that present at frequency > 0.1%
listOfFiles <- lapply(listOfFiles, subset, VarFreq > 0.0008)
# round the value of frequency to 0.001 (0.1%, 3rd number after dot)
listOfFiles <- lapply(listOfFiles, function(x){
x$VarFreq <- round(x$VarFreq, 3)
return(x)
})
# keep the variants that comes from coverage > 5000 reads/nucleotide
listOfFiles <- lapply(listOfFiles, subset, Reads1 + Reads2 > 1000)
#list2env(listOfFiles ,.GlobalEnv)
# sort the alleles by mutation type SNPs, Indels, Deletions
listOfFiles <- lapply(listOfFiles, mutation_type)
# keep the variants that have the proper base quality
listOfFiles <- lapply(listOfFiles, base_quality)
# keep the SNPs that are present in the equvalent proportion on the both pair-end reads
listOfFiles <- lapply(listOfFiles, filter_reads_proportion)
# keep the variants that have passed the Fisher`s test
listOfFiles <- lapply(listOfFiles, subset, Pvalue < 0.05)
#dropping the indels of size +/- below frequency 0.5% base pair bacause there is no way to filter them from the sequencing errors
listOfFiles <- lapply(listOfFiles, drop_low_indels)
# keep the indels that are present in the equvalent proportion on the both pair-end reads
#listOfFiles <- lapply(listOfFiles, filter_reads_proportion_indels)
#---------------------------------
##4. allele candidates annotation
#--------------------------------
# assigning the gene names
listOfFiles <- lapply(listOfFiles, gene_annotation)
#marking the strain direction of the gene on the DNA
listOfFiles <- lapply(listOfFiles, strain_direction)
# dropping the variants outside the cDNA of the 9 genes
listOfFiles <- lapply(listOfFiles, subset, gene != "no")
# calculting the position of the mutation on the cDNA of the gene
listOfFiles <- lapply(listOfFiles, cDNA_annotation)
#
listOfFiles <- lapply(listOfFiles, function(x) {x[25] <- lapply(x[25], as.integer)
x})
#annotating the mutations
listOfFiles <- lapply(listOfFiles, mutation_annotation)
# removing unneccecary sequences
listOfFiles <- lapply(listOfFiles, subset, strain_direction != "no")
# adding the indexes that order genes in following direction: sgf73, win1, wis1, sty1, mkh1, pek1, pmk1, pmc1, tif452
listOfFiles <- lapply(listOfFiles, gene_order)
# adding the column with time in quiescence
listOfFiles <- lapply(listOfFiles, time_in_quiescence)
#assigning the names of the samples
#tmp <- sample_names(tmp)
# sorting the data based on the gene order, sample, time in quiescence and position on the cDNA
listOfFiles <- lapply(listOfFiles, sorting)
#complex mutations
listOfFiles <- lapply(listOfFiles, list_to_vectors)
#listOfFiles <- lapply(listOfFiles, Complex_mkh1)
#5. here I need to unlist the data frames
list2env(listOfFiles ,.GlobalEnv)
#complex mutation
wt3_2m.txt <- Complex_mkh1(wt3_2m.txt)
wt0_2m.txt <- Complex_pmk1(wt0_2m.txt)
wt0_3m.txt <- Complex_pmk1(wt0_3m.txt)
#combining into a single dataframe a list of the dataframes
all_together <- do.call("list", lapply(ls(pattern = "wt._*"),get))
names(all_together) <- ls(pattern = "wt._*")
#subsetting SNPs
list_SNP <- lapply(all_together, subset, mutation_type =="SNP")
for (i in names(list_SNP)){
names(list_SNP) <- paste(names(list_SNP), "_SNP", sep = "")
return(names(list_SNP))
}
#subsetting indels
list_indel <- lapply(all_together, subset, mutation_type !="SNP" & mutation_type !="Complex")
for (i in names(list_indel)){
names(list_indel) <- paste(names(list_indel), "_indel", sep = "")
return(names(list_indel))
}
##6. plotting raw variants
#plotting the SNPs
p_SNP <- lapply(list_SNP, plot_histogram)
#plotting the indels
p_indel <- lapply(list_indel, plot_histogram)
##--------------------------------------------------
#saving the plots to tiff files
##--------------------------------------------------
output_tiff(p_SNP)
output_tiff(p_indel)
all_together <- lapply(all_together, rest_of_population)
list2env(all_together ,.GlobalEnv)
##############################
#manual removing and adding variants
##############################
write.table(all_together, file = "/home/rostyslav/Desktop/R_analysis/PhD_project/NGS_sequencing/4_cultures/final_results_4cultures.txt", sep="\t", quote=T, row.names=F, col.names=T)
#----------------------------------------------
#updating data after manual check of .bam files
#----------------------------------------------
#subsetting unique variants to build the lolliplot
unique_variants_per_month <- function (x){
tmp <- ddply(x, .(Position, Sample), nrow)
x <- merge(tmp, x, by=c("Position", "Sample"))
x <- x[!(duplicated(x[, 1:2], fromLast=T) & !duplicated(x[, 1:2])),]
return(x)
}
all_together <- do.call("rbind", lapply(ls("pattern" = "wt.*"),get))
all_together <- list_to_vectors(all_together)
all_together <- sorting(all_together)
all_together <- sample_names(all_together)
all_together <- unique_variants_per_month(all_together)
mutation_per_gene <- function(x){
tmp <- x %>%
select(gene, mutation_type)
tmp <- as.data.frame(table(tmp))
tmp2 <- reshape(tmp, direction="wide", timevar="mutation_type",idvar="gene")
tmp2 <- gene_order(tmp2)
tmp2 <- tmp2[order(tmp2$gene_order),]
tmp2$Indels <- c(tmp2$Freq.Insertion + tmp2$Freq.Deletion)
tmp2 <- tmp2 %>%
select(gene, Indels, Freq.SNP)
tmp2$SNPs <- tmp2$Freq.SNP
tmp2$Freq.SNP <- NULL
rownames(tmp2) <- seq(length=nrow(tmp2))
return(tmp2)
}
remove_hot_spots_homopolymers <- function(x){
a <- which(grepl(616208, x$Position) & grepl("+T", x$VarAllele))
b <- which(grepl(2122889, x$Position) & grepl("+A", x$VarAllele))
c <- which(grepl(5093449, x$Position) & grepl("+A", x$VarAllele))
x<- x[-c(a, b, c), , drop = FALSE]
rownames(x) <- NULL
return(x)
}
all_together_2 <- remove_hot_spots_homopolymers(all_together)
remove_hot_spots_all <- function(x){
a <- which(grepl(616208, x$Position) & grepl("+T", x$VarAllele))
b <- which(grepl(2122889, x$Position) & grepl("+A", x$VarAllele))
c <- which(grepl(5093449, x$Position) & grepl("+A", x$VarAllele))
d <- which(grepl(5091712, x$Position) & grepl("+TATCCTTCACGTCGTTC", x$VarAllele))
e <- which(grepl(5090833, x$Position) & grepl("+CTACTACATCCTC", x$VarAllele))
x<- x[-c(a, b, c, d, e), , drop = FALSE]
rownames(x) <- NULL
return(x)
}
all_together_3 <- remove_hot_spots_all(all_together)
temp1 <- mutation_per_gene(all_together)
temp2 <- mutation_per_gene(all_together_2)
temp3 <- mutation_per_gene(all_together_3)
sum(temp3$Indels)
sum(temp3$SNPs) |
791cf718e8f4dae70523fb36f41bfaca7d3b5f4c | 892013c7ab86e6814d751d08bb12fbc29e50c819 | /examples/3.knapsack/knapsack.R | d6c650137529fcb095b2e7141e8c8aace3f0ee7f | [] | no_license | shuaiwang88/optimization_modeling_class_using_R_Pyomo | 00a436b5c1e8be49522baf0ea003878a019a9b21 | 5d893f6538b7faff332df4ce615a9415a4d37298 | refs/heads/master | 2023-04-29T18:08:42.040846 | 2023-04-18T00:04:18 | 2023-04-18T00:04:18 | 253,131,900 | 6 | 2 | null | null | null | null | UTF-8 | R | false | false | 806 | r | knapsack.R | library(dplyr)
library(ROI)
library(ROI.plugin.glpk)
library(ompr)
library(ompr.roi)
# Knapsack problem
max_capacity <- 400
n <- 5
# weights <- runif(n, max = max_capacity)
weights <- c(300, 1, 200, 100,10)
value <- c(4000,5000,5000,2000,1000)
MIPModel() %>%
add_variable(x[i], i = 1:n, type = "binary") %>%
set_objective(sum_expr(value[i] * x[i], i = 1:n), "max") %>%
add_constraint(sum_expr(weights[i] * x[i], i = 1:n) <= max_capacity) %>%
solve_model(with_ROI(solver = "glpk")) %>%
get_solution(x[i]) %>%
filter(value > 0)
demand_df <- data.frame(
zone = rep(c('zone1', 'zone2', 'zone3'), 24*2*2),
job = rep(c("full", 'part'), 24*3*2),
rule = rep(c('rule1', 'rule2'), 24*3*2 ),
demand = sample.int(80:120, size = 24*3*2*2, replace = T),
)
demand_df
|
75768924579ade8c5a8f40a82a61d67ee551ee71 | 019309501ace2ab2de67bbb49d5bcbf2c1efe823 | /03_Datasize manipulation.R | 9fee333ae77222f357f475714b24568630c060fe | [] | no_license | petergajda/Data-Science-Capstone | cfafa1afb03eef74b2fd9eb77c7b271344a791a5 | 577324fb4c0a62f70afd287b8c0879ae219e6aa1 | refs/heads/master | 2021-01-14T00:26:21.645203 | 2020-02-23T16:03:50 | 2020-02-23T16:03:50 | 242,541,238 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,970 | r | 03_Datasize manipulation.R | ################################################
######## Code for datasize manipulation ########
################################################
# clear all
rm(list = ls())
# libraries
library(stringr)
library(tidyverse)
library(dplyr)
library(data.table)
library(stringdist)
# Import of rds files
uni_words <- readRDS(file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-preprocessed/uni_words.rds")
bi_words <- readRDS(file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-preprocessed/bi_words.rds")
tri_words <- readRDS(file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-preprocessed/tri_words.rds")
quad_words <- readRDS(file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-preprocessed/quad_words.rds")
# Transformation of ngrams with only remaining top words per group
bi_words <- as.data.table(bi_words)[as.data.table(bi_words)[, .I[which.max(n)], by=word1]$V1]
leven_bi <- filter(bi_words, n > 1)
tri_words <- as.data.table(tri_words)[as.data.table(tri_words)[, .I[which.max(n)], by=c("word1", "word2")]$V1]
tri_words <- filter(tri_words, n > 1)
leven_tri <- filter(tri_words, n > 3)
quad_words <- as.data.table(quad_words)[as.data.table(quad_words)[, .I[which.max(n)], by=c("word1", "word2", "word3")]$V1]
quad_words <- filter(quad_words, n > 2)
leven_quad <- filter(quad_words, n > 10)
# save rds
saveRDS(uni_words, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/uni_words.rds")
saveRDS(bi_words, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/bi_words.rds")
saveRDS(tri_words, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/tri_words.rds")
saveRDS(quad_words, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/quad_words.rds")
saveRDS(leven_bi, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/leven_bi.rds")
saveRDS(leven_tri, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/leven_tri.rds")
saveRDS(leven_quad, file = "C:/Projekte/Projects/R/Coursera/Week 10/rds-final/leven_quad.rds")
|
2acc11035419a940f646bfeee3ccc6a95401b97b | e77d4ec26cb8bbc6b81700dacac7545baa15bab7 | /data-code/9_plan_benefits-var_avaliability_combined.R | 4792739281efc6ec036d50369fff087f78f9e807 | [] | no_license | smonto2/Medicare-Advantage | 1b8d622d93e927a99da5a94283e5173f386a9634 | d8815c6a7d87681ffe7bf37038d69b2d70d9ebdf | refs/heads/master | 2023-09-05T10:10:48.456035 | 2023-01-11T13:54:52 | 2023-01-11T13:54:52 | 269,175,533 | 0 | 0 | null | 2020-06-03T19:24:44 | 2020-06-03T19:24:43 | null | UTF-8 | R | false | false | 1,524 | r | 9_plan_benefits-var_avaliability_combined.R | #' Combine availability of variables for all years and to show a whole picture
#'
#'
# Preliminaries -----------------------------------------------------------
if (!require("pacman")) install.packages("pacman")
pacman::p_load(readxl, tidyverse, data.table, readr)
## function
source("data-code/fn_plan_benefits_var_avaliability.R")
## Variables to change when new data comes in
start_year = 2008
end_year = 2019
end_q = 2
## Append all the data together
## Folder list
pre2017_folder_list <- paste0("PBP-Benefits-", start_year:2017) #Until 2017, yearly file
pos2017_folder_list <- do.call(paste0,
arrange_all(
expand.grid(paste0("PBP-Benefits-",2018:end_year),
paste0("-Q", seq(1:4)))
)
) #Since 2018, quarterly file
folder_list <- c(pre2017_folder_list, pos2017_folder_list)
if (end_q != 4){
folder_list <- head(folder_list, -(4-end_q)) #Adjust for latest available data
}
## combine all the data
for (i in folder_list){
print(paste(i,"is being processed ..."))
if(i == folder_list[1]){
ava_df = planb.var(i, step_up = FALSE)
} else {
col_yr = planb.var(i, step_up = FALSE)
ava_df = ava_df %>% full_join(col_yr, by = "var_list")
}
}
ava_df = ava_df %>%
mutate(percentage = round(rowSums(.[-1], na.rm = TRUE)/(ncol(.)-1),3))
## Exporting the result
write_tsv(ava_df,
file="data/data-out/ma-data/plan-benefits/planb.var_ava.tsv")
|
dd7911d941d0653be2dd8a5135ca7a541c5dc58e | 037894abbeeb4b06a30458c368f5ff19d35fb75e | /r/process_cal_main.r | a20b71c8cbce0abab0550b68b310de1638fc67ea | [] | no_license | andydawson/stepps-prediction | fb296f7d5684d654f3e3f3213496f3a54b6bb671 | 2d3691ec5bd5c287346bb46c1d0ca06d0ffe4cac | refs/heads/master | 2020-12-29T02:40:10.001567 | 2018-11-22T02:09:06 | 2018-11-22T02:09:06 | 26,365,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,333 | r | process_cal_main.r | # for some reason this code works with R-3.1.2 and not 3.2.0; fix later
library(Rcpp)
library(inline)
library(ggplot2)
library(rstan)
library(reshape)
library(fields)
source('r/utils/pred_plot_funs.r')
source('r/utils/pred_helper_funs.r')
source('r/utils/build_mu_g.r')
source('r/read_stanbin.r')
source('r/mugp.r')
# edit this file to process different runs
source('r/runs_cal_3by.r')
source('r/runs_cal_1by.r')
source('r/runs_cal_5by.r')
# source('data/comp_data_12taxa_mid_ALL_v0.3.rdata')
for (run in runs){
# post_process_run(run)
# gc()
suff_dat = run$suff_dat
suff_fit = run$suff_fit
suff_figs = run$suff_figs
# source('r/pred_process_full_test.r')
# where to put the figures
subDir <- paste("figures/", suff_fit, sep='')
create_figure_path(subDir)
# load the data and posterior draws
load(paste0('r/dump/', suff_dat, '.rdata'))
post_dat = load_stan_output(suff_fit)
# hack for now, fix later
post_dat$par_names = post_dat$par_names[7:length(post_dat$par_names)]
# N = nrow(d_inter)
# N_knots = ncol(d_inter)
process_out = build_r(post_dat, N, T, K)
print('Built r')
process_mean = build_mu_g_no_time(post_dat, rho, eta, T, K, d, d_inter, d_knots, od, mpp, mu0)
print('Built mu_g')
# for full model
W = K-1
N_pars = 3*(K-1) + 1
write_par_vals(post_dat, taxa, subDir, N_pars)
# compute ess and write to file
sink(sprintf('%s/ess.txt', subDir), type='output')
print('Effective samples sizes : ')
ess_all = rowSums(sapply(post_dat$post[,1:N_pars], function(y) apply(y[,1,], 2, function(x) ess_rfun(x))))
print(as.matrix(ess_all[!(sapply(strsplit(names(ess_all),'\\['), function(x) x[[1]]) == 'log_lik')]))
sink()
# trace_plot_pars(post, N_knots, T, N_pars, taxa=taxa, suff=suff, save_plots=save_plots)
# trace_plot_mut(post, N_knots, T, N_pars, mean_type='other', suff=suff, save_plots=save_plots)
#
# ## fix this!!!
# #trace_plot_knots(fit, N_knots, T, K, N_pars=N_pars, suff=suff, save_plots=save_plots)
# # figure out nugget size
# adj = get_corrections(post_dat, rho, eta, T, K, d_inter, d_knots)
# adj_s = adj$adj_s
# adj_t = adj$adj_t
# t(apply(adj_s + adj_t, 2, function(x) quantile(x, probs=c(0.025, 0.5, 0.975))))
diff_g_mug = process_out$g - process_mean$mu_g
summary_diff_g_mug = matrix(nrow=K-1, ncol=3)
for (k in 1:(K-1)){
summary_diff_g_mug[k,] = quantile(abs(as.vector(diff_g_mug[,k,])), probs=c(0.025, 0.5, 0.975))
}
summary_diff_g_mug
###############################################################################################################
# chunk: load processed output
###############################################################################################################
r_pred = process_out$r
g = process_out$g
mu_g = process_mean$mu_g
mu = process_mean$mu
Halpha_s = process_mean$Halpha_s
r_mu_g = build_r_from_mu_g(mu_g, N, T, K)
niter = dim(g)[3]
mean_Halpha_s = array(NA, dim=c(W, niter))
for (k in 1:W){
mean_Halpha_s[k, ] = colSums(Halpha_s[,k,])/N
}
pdf(file=paste0(subDir, '/trace_mu.pdf'), width=8, height=12)
par(mfrow=c(5,2))
par(oma=c(0,0,2,0))
for (k in 1:W){
plot(mu[,k], type="l", ylab=paste0('mu[', k, ']'))
abline(h=mean(mu[,k]), col="blue")
abline(h=quantile(mu[,k],probs=0.025), col='blue', lty=2)
abline(h=quantile(mu[,k],probs=0.975), col='blue', lty=2)
# lines(mu_t[t+1,1,], col="blue")
# plot(sum_Halpha_t[k,t,], type="l", ylab=paste0('sum_Halpha[', k, ',', t, ']'))
# title(main=taxa[k], outer=TRUE)
}
dev.off()
pdf(file=paste0(subDir, '/trace_Halpha_s.pdf'), width=8, height=12)
par(mfrow=c(5,2))
par(oma=c(0,0,2,0))
for (k in 1:W){
plot(mean_Halpha_s[k,], type="l", ylab=paste0('mean_Halpha_s[', k, ']'))
abline(h=mean(mean_Halpha_s[k,]), col="blue")
abline(h=quantile(mean_Halpha_s[k,],probs=0.025), col='blue', lty=2)
abline(h=quantile(mean_Halpha_s[k,],probs=0.975), col='blue', lty=2)
# lines(mu_t[t+1,1,], col="blue")
# plot(sum_Halpha_t[k,t,], type="l", ylab=paste0('sum_Halpha[', k, ',', t, ']'))
# title(main=taxa[k], outer=TRUE)
}
dev.off()
# trace_plot_process(mu_g, suff='mu_g', save_plots=save_plots)
trace_plot_process(r_pred, suff='r', save_plots=save_plots)
trace_plot_process(g, suff='g', save_plots=save_plots)
r_mean = matrix(NA, nrow=N*T, ncol=K)
r_mu_g_mean = matrix(NA, nrow=N*T, ncol=K)
g_mean = matrix(NA, nrow=N*T, ncol=W)
mu_g_mean = matrix(NA, nrow=N*T, ncol=W)
niter = dim(r_pred)[3]
for (i in 1:(N*T)){
r_mean[i,] = rowSums(r_pred[i,,])/niter
r_mu_g_mean[i,] = rowSums(r_mu_g[i,,])/niter
g_mean[i,] = rowSums(g[i,,])/niter
mu_g_mean[i,] = rowSums(mu_g[i,,])/niter
}
print('Computed mean pred vals')
limits = get_limits(centers_pls)
####################################################################################################
# chunk: plot predicted distributions
####################################################################################################
# suff1=paste(suff_fit, '_props', sep='')
suff = paste0('props_', suff_figs)
# plot_pred_maps(r_mean, centers_veg, taxa=taxa, ages, N, K, T, thresh=0.5, limits, type='prop', suff=suff1, save_plots=save_plots)
# suff1.1=paste(suff_fit, '_props_select', sep='')
plot_pred_maps_select(r_mean, centers_veg, taxa=taxa, ages, N, K, T, thresh=0.5, limits, type='prop', suff=suff, save_plots=save_plots)
breaks = c(0, 0.01, 0.05, 0.10, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 1)
# p_binned <- plot_pred_maps_binned(r_mean, centers_veg, breaks, taxa, ages, N, K, T, limits, suff=suff1, save_plots=save_plots)
p_binned <- plot_pred_maps_binned_select(r_mean, centers_veg, breaks, taxa, ages, N, K, T, limits, suff=suff_figs, save_plots, fpath=subDir)
print('Plotted predictions')
####################################################################################################
# chunk: predicted process maps
####################################################################################################
# suff2=paste(suff_fit, '_process', sep='')
#
# plot_pred_maps(g_mean, centers_veg, taxa=taxa, ages, N, K-1, T, thresh=NA, limits, type='process', suff=suff2, save_plots=save_plots)
suff=paste0('process_', suff_figs)
plot_pred_maps_select(g_mean, centers_veg, taxa=taxa, ages, N, K-1, T, thresh=NA, limits, type='process', suff=suff, save_plots=save_plots)
suff=paste0('mug_', suff_figs)
plot_pred_maps_select(r_mu_g_mean, centers_veg, taxa=taxa, ages, N, K-1, T, thresh=NA, limits, type='prop', suff=suff, save_plots=save_plots)
print('Plotted process')
####################################################################################################
# chunk: plot raw pls
####################################################################################################
plot_data_maps(y_veg, centers=centers_pls, taxa=taxa, ages, N_pls, K, T, thresh=0.5, limits, suff=suff_figs, save_plots=save_plots)
plot_data_maps_binned(y_veg, centers=centers_pls, taxa=taxa, ages, N_pls, K, T, breaks, limits, suff=suff_figs, save_plots=save_plots)
} |
cdb4d9bc3ae78103a56af9774e1f319f476451a9 | 68908b60d07ec5b643cb65db1635d09c428519f8 | /dev/01-data-for-benchmarks.R | 16f2904582356ae6153dfc21c97cdd2702eb5a3c | [
"Apache-2.0"
] | permissive | pachadotdev/eflm | ed73ddb0e693fba150274737636e12413c6d6d50 | 7595a29aa1397902eb65a1cd51ced751495978d9 | refs/heads/main | 2023-05-23T07:12:49.253997 | 2023-05-16T15:10:53 | 2023-05-16T15:10:53 | 326,508,783 | 10 | 2 | NOASSERTION | 2023-05-16T15:10:54 | 2021-01-03T22:00:25 | R | UTF-8 | R | false | false | 1,619 | r | 01-data-for-benchmarks.R | library(yotover)
library(dplyr)
ch1_application1_2 <- yotov_data("ch1_application1") %>%
filter(year %in% seq(1986, 2006, 4))
ch1_application1_2 <- ch1_application1_2 %>%
mutate(
log_trade = log(trade),
log_dist = log(dist)
)
ch1_application1_2 <- ch1_application1_2 %>%
# Create Yit
group_by(exporter, year) %>%
mutate(
y = sum(trade),
log_y = log(y)
) %>%
# Create Eit
group_by(importer, year) %>%
mutate(
e = sum(trade),
log_e = log(e)
)
ch1_application1_2 <- ch1_application1_2 %>%
# Replicate total_e
group_by(exporter, year) %>%
mutate(total_e = sum(e)) %>%
group_by(year) %>%
mutate(total_e = max(total_e)) %>%
# Replicate rem_exp
group_by(exporter, year) %>%
mutate(
remoteness_exp = sum(dist * total_e / e),
log_remoteness_exp = log(remoteness_exp)
) %>%
# Replicate total_y
group_by(importer, year) %>%
mutate(total_y = sum(y)) %>%
group_by(year) %>%
mutate(total_y = max(total_y)) %>%
# Replicate rem_imp
group_by(importer, year) %>%
mutate(
remoteness_imp = sum(dist / (y / total_y)),
log_remoteness_imp = log(remoteness_imp)
)
ch1_application1_2 <- ch1_application1_2 %>%
# This merges the columns exporter/importer with year
mutate(
exp_year = paste0(exporter, year),
imp_year = paste0(importer, year)
)
trade_data_yotov <- ch1_application1_2 %>%
filter(exporter != importer)
trade_data_yotov <- trade_data_yotov %>%
ungroup() %>%
select(year, trade, dist, cntg, lang, clny, exp_year, imp_year)
saveRDS(trade_data_yotov, file = "dev/trade_data_yotov.rds", compress = "xz")
|
ba70912aa29cf51c2b310dd3fe457b667d8da891 | 75e26d5f9af95657f0f573c27e5607f497e19e63 | /man/named_vec_to_df.Rd | 987948d6dc7ea773cf4e79698e0cfd43fd820515 | [
"MIT"
] | permissive | andrepvieira/ddpcr | f2dfccdedae47e5372dc1ec0d22456bd8e02ba5a | b28b9a4c1e1850f4e74aef990a9f76e488956d13 | refs/heads/master | 2020-03-19T22:34:29.788463 | 2018-05-26T21:35:56 | 2018-05-26T21:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,050 | rd | named_vec_to_df.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{named_vec_to_df}
\alias{named_vec_to_df}
\title{Convert a named vector returned from vapply to a dataframe}
\usage{
named_vec_to_df(v, name, rowname = "well")
}
\arguments{
\item{v}{Named vector that is a result of a vapply}
\item{name}{Column name to use for the name of each element}
\item{rowname}{Column name to use for the values of the rownames}
}
\description{
When running a \code{vapply} function and each element returns a single value,
the return value is a named vector. This function can be used to convert
that return value into a data.frame. Similar to \code{\link[ddpcr]{lol_to_df}},
but because the output format from \code{vapply} is different depending on
whether a single value or multiple values are returned, a different function
needs to be used.
}
\examples{
vapply(c("a", "b", "c"),
toupper,
character(1)) \%>\%
named_vec_to_df("capital", "letter")
}
\seealso{
\code{\link[ddpcr]{lol_to_df}}
}
\keyword{internal}
|
daaa80c7b3968c2d93872a5d0a9fc94772cf36de | 158fd5a65a3f427a737664ee763c9574f16f7d7a | /modeling_code/Linear-Models/anovas.R | 29fda05ee58a957d3531ce1819c3c89b13e3a9de | [] | no_license | rachelsterneck/lm_1b | 20d133604a64371de00042ec966403195f8647be | 709a5260a5141f211b0b9e7bb9ef2efd70a2e45c | refs/heads/master | 2022-03-28T04:38:20.122416 | 2020-01-03T19:14:49 | 2020-01-03T19:14:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,036 | r | anovas.R | anova(lm.interp.curr.1, lm.interp.plus.rnn.curr.1)
anova(lm.rnn.2, lm.interp.plus.rnn.curr.1)
anova(lm.interp.curr.1, lm.interp.plus.5gram.curr.1)
anova(lm.klm5gram.2, lm.interp.plus.5gram.curr.1)
anova(lm.interp.prev.1, lm.interp.plus.rnn.prev.1)
anova(lm.rnn.3, lm.interp.plus.rnn.prev.1)
anova(lm.interp.prev.1, lm.interp.plus.5gram.prev.1)
anova(lm.klm5gram.3, lm.interp.plus.5gram.prev.1)
########## balanced interpolation
anova(lm.interp.balanced.curr.1, lm.interp.balanced.plus.rnn.curr.1)
anova(lm.rnn.2, lm.interp.balanced.plus.rnn.curr.1)
anova(lm.interp.balanced.curr.1, lm.interp.balanced.plus.5gram.curr.1)
anova(lm.klm5gram.2, lm.interp.balanced.plus.5gram.curr.1)
anova(lm.interp.balanced.curr.1, lm.interp.curr.1)
anova(lm.interp.balanced.prev.1, lm.interp.balanced.plus.rnn.prev.1)
anova(lm.rnn.3, lm.interp.balanced.plus.rnn.prev.1)
anova(lm.interp.balanced.prev.1, lm.interp.balanced.plus.5gram.prev.1)
anova(lm.klm5gram.3, lm.interp.balanced.plus.5gram.prev.1)
anova(lm.interp.balanced.prev.1, lm.interp.prev.1)
|
2a2863f7149f0c4cd4be7c823549406075aed874 | a02678f607a80458a78e571102a599eb791d1627 | /HSP/HSPkorelacje.R | 166821a6c6eb74477973863740b6cc8c9c55f0b6 | [] | no_license | geneticsMiNIng/HSP | 584f4cf9b8b9ea4ae58033bc25102b146b2e505b | 798799ec4c543c8a8ecd5ae7191b556f7cab5c35 | refs/heads/master | 2021-01-17T18:01:37.643154 | 2016-12-05T22:53:32 | 2016-12-05T22:53:32 | 59,332,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,103 | r | HSPkorelacje.R | library(shiny)
library(ggplot2)
library(survminer)
library(survival)
load("dfll.rda")
dfll$pat <- substr(dfll$pat, 1, 12)
load("clinical_expression_mut.rda")
clinical_expression_mut2 <- merge(clinical_expression_mut, dfll, by.x="X_PATIENT", by.y="pat")
#table(clinical_expression_mut2$X_cohort, clinical_expression_mut2$tp53)
clinical_expression_mut2$tp53 <- ifelse(clinical_expression_mut2$tp53 == -1, "LOH", "-")
#df <- (clinical_expression_mut[, , drop=FALSE])
df <- (clinical_expression_mut2[, , drop=FALSE])
clinical_expression_mut <- clinical_expression_mut[!grepl(clinical_expression_mut$X_cohort, pattern = "Formalin"), ]
clinical_expression_mut$MDM2
clinical_expression_mut$DNAJB1
ggplot(clinical_expression_mut, aes(MDM2, DNAJB1)) +
geom_point() + facet_wrap(~X_cohort)
df <- (clinical_expression_mut2[clinical_expression_mut2$X_cohort == "TCGA Breast Cancer", , drop=FALSE])
hspgene = "MDM2"
df$MDM2b <- cut(df[,"MDM2"], breaks = c(-100,0,100), labels = paste("MDM2",c("low", "high")), right=FALSE)
df$hsp <- cut(df[,hspgene], breaks = c(-100,0,100), labels = paste(hspgene,c("low", "high")), right=FALSE)
#df$MDM2b <- cut(df[,"MDM2"], breaks = c(-100,median(df[,"MDM2"], na.rm = TRUE),100), labels = paste("MDM2",c("low", "high")), right=FALSE)
#df$hsp <- cut(df[,hspgene], breaks = c(-100,median(df[,hspgene], na.rm = TRUE),100), labels = paste(hspgene,c("low", "high")), right=FALSE)
df$TP53 = ifelse(df$X18475 == "1", "TP53 mut", "TP53 wild")
df <- na.omit(df[,c("X_TIME_TO_EVENT", "X_EVENT", "TP53", "MDM2b", "hsp", "tp53")])
df$g <- factor(paste(df$MDM2b, df$TP53, df$hsp))
table(df$g, df$tp53)
df2 <- df[df$tp53 == "LOH", ]
df2$g2 = factor(df2$g == "MDM2 high TP53 mut MDM2 high")
model <- survfit(Surv(X_TIME_TO_EVENT,X_EVENT) ~ g2, data=df2)
pp <- pchisq(survdiff(Surv(X_TIME_TO_EVENT,X_EVENT) ~ g2, data=df2)$chisq, nlevels(df2$g2)-1, lower.tail = F)
ggsurvplot(model, xlim=c(0,3000), main=paste0("Low/High ",hspgene, "\n LOH p:", signif(pp,2)), # , "\ncases: ", nrow(df)
legend="none",
risk.table = TRUE, risk.table.y.text.col = TRUE)$plot
|
416e8810349d27dea3af205f11763d1924643bd2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sitar/examples/xaxsd.Rd.R | 43b032e9151193a55239e15e3ba503d3c8ccb134 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 469 | r | xaxsd.Rd.R | library(sitar)
### Name: xaxsd
### Title: Par args xaxs and yaxs option d
### Aliases: xaxsd yaxsd yaxsd
### ** Examples
## generate and plot 100 data points
x <- rnorm(100)
y <- rnorm(100)
plot(x, y, pch=19)
## generate and plot 10 more
## constraining axis scales to be as before
x <- rnorm(10)
y <- rnorm(10)
plot(x, y, pch=19, xlim=xaxsd(), ylim=yaxsd())
## force axis extremes to be -3 and 3
plot(x, y, pch=19, xlim=xaxsd(c(-3,3)), ylim=yaxsd(c(-3,3)))
|
2c7261fe93be2769c632355a631bcd86d9515b99 | 9034dfa4936f52dff9b31f2dee5515c848a3d9bb | /R/plot.longiPenal.R | 91236a54d3a13adb9f312141a69833b463bcd7ed | [] | no_license | cran/frailtypack | 41c6fa2c6a860c9b1e2feff84814e29d32b56091 | dfbdc53920a754f8529829f1f0fccc8718948cca | refs/heads/master | 2022-01-01T02:07:40.270666 | 2021-12-20T09:30:02 | 2021-12-20T09:30:02 | 17,696,141 | 9 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,651 | r | plot.longiPenal.R | #' Plot Method for a joint model for longitudinal data and a terminal event.
#'
#' Plots estimated baseline survival and hazard functions for a terminal
#' outcome from an object of class 'longiPenal'. Confidence bands are allowed.
#'
#'
#' @aliases plot.longiPenal lines.longiPenal
#' @usage
#'
#' \method{plot}{longiPenal}(x, type.plot = "Hazard", conf.bands=TRUE,
#' pos.legend= "topright", cex.legend=0.7, main, color, median=TRUE, Xlab = "Time", Ylab =
#' "Hazard function", ...)
#' @param x A joint model for longitudinal outcome and a terminal event, i.e. a
#' \code{longiPenal} class object (output from calling \code{longiPenal}
#' function).
#' @param type.plot a character string specifying the type of curve for the
#' terminal event. Possible value are "Hazard", or "Survival". The default is
#' "Hazard". Only the first words are required, e.g "Haz", "Su"
#' @param conf.bands Logical value. Determines whether confidence bands will be
#' plotted. The default is to do so.
#' @param pos.legend The location of the legend can be specified by setting
#' this argument to a single keyword from the list '"bottomright"', '"bottom"',
#' '"bottomleft"', '"left"', '"topleft"', '"top"', '"topright"', '"right"' and
#' '"center"'. The default is '"topright"'
#' @param cex.legend character expansion factor *relative* to current
#' 'par("cex")'. Default is 0.7
#' @param main title of plot
#' @param color color of the curve (integer)
#' @param median Logical value. Determines whether survival median will be plotted. Default is TRUE.
#' @param Xlab Label of x-axis. Default is '"Time"'
#' @param Ylab Label of y-axis. Default is '"Hazard function"'
#' @param ... other unused arguments
#' @return Print a plot for the terminal event of the joint model for a
#' longitudinal and survival data.
#' @seealso \code{\link{longiPenal}}
#' @keywords file
##' @export
#' @examples
#'
#'
#' \dontrun{
#' ###--- Joint model for longitudinal data and a terminal event ---###
#'
#' data(colorectal)
#' data(colorectalLongi)
#'
#' # Survival data preparation - only terminal events
#' colorectalSurv <- subset(colorectal, new.lesions == 0)
#'
#' # Baseline hazard function approximated with splines
#' # Random effects as the link function
#'
#' model.spli.RE <- longiPenal(Surv(time1, state) ~ age + treatment + who.PS
#' + prev.resection, tumor.size ~ year * treatment + age + who.PS ,
#' colorectalSurv, data.Longi = colorectalLongi, random = c("1", "year"),
#' id = "id", link = "Random-effects", left.censoring = -3.33,
#' n.knots = 7, kappa = 2)
#' pdf(file = "/home/agareb1/etudiants/al10/newpack/test/plot_longi.pdf")
#'
#' # Plot the estimated baseline hazard function with the confidence intervals
#' plot(model.spli.RE)
#'
#' # Plot the estimated baseline hazard function with the confidence intervals
#' plot(model.spli.RE, type = "Survival")
#' }
#'
#'
"plot.longiPenal" <- function (x, type.plot="Hazard", conf.bands=TRUE, pos.legend="topright", cex.legend=0.7, main, color=2, median=TRUE, Xlab = "Time", Ylab = "Hazard function", ...)
{
plot.type <- charmatch(type.plot, c("Hazard", "Survival"),nomatch = 0)
if (plot.type == 0) {
stop("estimator must be Hazard or Survival")
}
if(missing(main))
main<-""
if(plot.type==1){ # hazard
if(conf.bands){
matplot(x$xD[-1,1], x$lamD[-1,,1], col=color, type="l", lty=c(1,2,2), xlab=Xlab,ylab=Ylab, main=main, ...)
}else{
plot(x$xD[-1,1], x$lamD[-1,1,1], col=color, type="l", lty=1, xlab=Xlab,ylab=Ylab, main=main, ...)
}
}else{ # survival
if (missing(Ylab)) Ylab <- "Baseline survival function"
if (x$typeof == 0){
if (conf.bands){
matplot(x$xD[,1], x$survD[,,1], col=color, type="l", lty=c(1,2,2), xlab=Xlab,ylab=Ylab, main=main, ...)
if (median){abline(a=0.5,b=0,cex=.5,col=1,lty=3)}
}else{
plot(x$xD[,1], x$survD[,1,1], col=color, type="l", lty=1, xlab=Xlab,ylab=Ylab, main=main, ...)
if (median){abline(a=0.5,b=0,cex=.5,col=1,lty=3)}
}
}else{
if (conf.bands){
matplot(x$xSuD[,1], x$survD[,,1], col=color, type="l", lty=c(1,2,2), xlab=Xlab,ylab=Ylab, main=main, ...)
if (median){abline(a=0.5,b=0,cex=.5,col=1,lty=3)}
}else{
plot(x$xSuD[,1], x$survD[,1,1], col=color, type="l", lty=1, xlab=Xlab,ylab=Ylab, main=main, ...)
if (median){abline(a=0.5,b=0,cex=.5,col=1,lty=3)}
}
}
}
legend(pos.legend, c("event"), lty=1, col=color, cex=cex.legend, ...)
return(invisible())
}
|
27ba62f5b3b9e371b7081a6bd329fbb7d299e6f6 | 956396a26bdc668c068d9d887d438bcde779f29c | /cachematrix.R | e059732d2578039999d49621fb70ec3dd3846159 | [] | no_license | karla-l/ProgrammingAssignment2 | 61e9966cb197ea5abd8c0cd12645ba615faa9f98 | f7d98b13a51c2c03abce8e1f49468390446c755b | refs/heads/master | 2021-01-18T09:15:49.972366 | 2015-02-22T12:45:55 | 2015-02-22T12:45:55 | 31,044,745 | 0 | 0 | null | 2015-02-20T00:36:05 | 2015-02-20T00:36:05 | null | UTF-8 | R | false | false | 1,634 | r | cachematrix.R | ## The following functions compute the inverse of a square matrix
## and cache the resultant using two functions:
##[1] makeCacheMatrix to create matrix object and store inverse
##[2] cacheSolve to compute matrix inverse and return cached inverse if already calculated
## The function makeCacheMAtrix creates a matrix object and
## uses getinverse and setinverse to store and return a cached version
## the matrix inverse.
## If the original matrix is changed, the cached
## inverse is invalid and passed to cacheSolve to be recalculated
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ##inverse is initiated as an uncalculated value
set <- function(y) {
x <<- y
inv<<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve calculates and returns
##the inverse of the matrix passed in makeCacheMatrix.
## If the inverse has already been calculated
## the function returns message "Inverse already calculated" and
## output inverse stored in makeCacheMatrix.
## If the inverse has not yet been calculated or a new matrix input
## the function computes the inverse of the matrix using the solve function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("Inverse already calculated")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...) ##Calculates matrix inverse
x$setinverse(inverse)
inverse
}
|
b1945c94473a0cddb3518402f2410caab3b14764 | a18c2a7cf79b96fd50d45dab7493a482d37eddb0 | /data/cellrangerRkit/man/get_matrix_dir_path.Rd | 0a31faee7f4222f4820be446e9a8c4829ac5d99e | [
"MIT"
] | permissive | buenrostrolab/10x_scRNAseq | b6514c07873ae2a9c8959498234958fb833db568 | 8e65ceffd8a7186d0c81b159e6b316bc2bfdc6bf | refs/heads/master | 2021-01-11T01:53:54.856901 | 2016-11-21T03:41:37 | 2016-11-21T03:41:37 | 70,646,869 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 581 | rd | get_matrix_dir_path.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{get_matrix_dir_path}
\alias{get_matrix_dir_path}
\title{Get path to matrices in Cell Ranger output}
\usage{
get_matrix_dir_path(pipestance_path, barcode_filtered)
}
\arguments{
\item{pipestance_path}{Path to the output directory produced by the Cell Ranger pipeline}
\item{barcode_filtered}{If true, get the path to the barcode filtered matrices; else to the raw filtered matrices}
}
\value{
The path to the matrices directory
}
\description{
Get path to matrices in Cell Ranger output
}
|
c31ca54800ccf88dfbe84fc0db9c533879d751ed | 3163e89817ded391b753a1932421b96241756633 | /R/old_getORF.R | d35129e4776aacfed80aea8abd1d7fda5b900c77 | [
"Apache-2.0"
] | permissive | fursham-h/ponder | c1612c1e2dfc1dba64ddc75fb368be6e0538bfc2 | 5131a51c73fcf2a28fd43122f97b23932a59c4ca | refs/heads/master | 2022-03-27T11:16:34.321088 | 2019-12-08T23:31:26 | 2019-12-08T23:31:26 | 126,469,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,048 | r | old_getORF.R | #' _ workflow: Test transcript for NMD feature against a reference CDS
#'
#' @description
#' THIS FUNCTION IS PART OF THE _ PROGRAM WORKFLOW.
#' This function will compare the query transcript with a reference CDS and tests
#' if insertion of alternative segments into the CDS generates an NMD substrate
#'
#' @param knownCDS
#' @param queryTx
#' @param refsequence
#' @param gene_id
#' @param transcript_id
#'
#' @return df
getORFold <- function(knownCDS, queryTx, refsequence, gene_id, transcript_id) {
# prep output list
output = list(ORF_considered = as.character(NA),
ORF_start = as.character('Not found'),
ORF_found = FALSE)
# precheck for annotated start codon on query transcript and update output
pre_report = testTXforStart(queryTx, knownCDS, full.output=TRUE)
output = utils::modifyList(output, pre_report["ORF_start"])
# return if there is no shared exons between transcript and CDS
if (is.na(pre_report$txrevise_out[1])) {
return(output)
}
# attempt to reconstruct CDS for transcripts with unannotated start
if ((pre_report$ORF_start == 'Not found') |
(pre_report$ORF_start == 'Annotated' & pre_report$firstexlen < 3)) {
pre_report = reconstructCDSstart(queryTx, knownCDS,
refsequence,
pre_report$txrevise_out,
full.output = TRUE)
output = utils::modifyList(output, list(ORF_start = pre_report$ORF_start))
# return if CDS with new 5' do not contain a start codon
if (pre_report$ORF_start == 'Not found'){
return(output)
}
}
# reconstruct CDS with insertion of alternative segments
augmentedCDS = reconstructCDS(txrevise_out = pre_report$txrevise_out,
fasta = refsequence,
gene_id = gene_id,
transcript_id = transcript_id)
output = utils::modifyList(output, augmentedCDS)
return(output)
}
|
13095263eca287347e0b606983c02d526aef21bb | 17e7a6ec5b4632d89eab288d214d04daddb03b68 | /R/pumpTokens.R | 854e62815266bcd4112b3b8e59a8db2959194d52 | [] | no_license | choisteph/cholera | 0d99ca082761558bee3d308fd95ba0e7f8e8f4ae | 57c2c3bcf1de7f7b606e41a00bb09ab769e4222d | refs/heads/master | 2022-11-23T06:05:05.734258 | 2020-07-30T15:36:10 | 2020-07-30T15:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,209 | r | pumpTokens.R | #' Add pump tokens to plot.
#'
#' @noRd
pumpTokens <- function(pump.select, vestry, case.set, snow.colors, type) {
if (vestry) {
dat <- cholera::pumps.vestry
} else {
dat <- cholera::pumps
}
if (case.set == "observed") {
if (is.null(pump.select)) {
points(dat[, c("x", "y")], pch = 24, lwd = 1.25, col = snow.colors)
text(dat[, c("x", "y")], pos = 1, cex = 0.9, labels = paste0("p", dat$id))
} else {
if (all(pump.select > 0)) {
sel <- dat$id %in% pump.select
} else if (all(pump.select < 0)) {
sel <- dat$id %in% abs(pump.select) == FALSE
}
points(dat[sel, c("x", "y")], pch = 24, lwd = 1.25,
col = snow.colors[sel])
text(dat[sel, c("x", "y")], pos = 1, cex = 0.9,
labels = paste0("p", dat$id[sel]))
}
} else if (case.set == "expected") {
if (type %in% c("road", "star")) {
if (is.null(pump.select)) {
points(dat[, c("x", "y")], pch = 24, lwd = 1.25, bg = snow.colors)
text(dat[, c("x", "y")], pos = 1, cex = 0.9,
labels = paste0("p", dat$id))
} else {
if (all(pump.select > 0)) {
sel <- dat$id %in% pump.select
} else if (all(pump.select < 0)) {
sel <- dat$id %in% abs(pump.select) == FALSE
}
points(dat[sel, c("x", "y")], pch = 24, lwd = 1.25,
bg = snow.colors[sel])
text(dat[sel, c("x", "y")], pos = 1, cex = 0.9,
labels = paste0("p", dat$id[sel]))
}
} else if (type %in% c("area.points", "area.polygons")) {
if (is.null(pump.select)) {
points(dat[, c("x", "y")], pch = 24, lwd = 1.25,
col = "white", bg = snow.colors)
text(dat[, c("x", "y")], pos = 1, cex = 0.9,
labels = paste0("p", dat$id))
} else {
if (all(pump.select > 0)) {
sel <- dat$id %in% pump.select
} else if (all(pump.select < 0)) {
sel <- dat$id %in% abs(pump.select) == FALSE
}
points(dat[sel, c("x", "y")], pch = 24, lwd = 1.25, col = "white",
bg = snow.colors[sel])
text(dat[sel, c("x", "y")], pos = 1, cex = 0.9,
labels = paste0("p", dat$id[sel]))
}
}
}
}
|
6a41b556efff4c1a35fdb6d996be476c5dfda49b | f6c871a87f64a5ca6d8dcf6931e05e6dc0fb0c1f | /R-Package/man/ClusterCalc.Rd | 918315e0f1f257d180d5c2cd6454df523b565c2a | [] | no_license | CreanzaLab/SongEvolutionModel | 073342a07eaeca061f92b6798b2ab1ff40567b58 | 449087c85280e11b97924575d2e3c618169a0f8b | refs/heads/master | 2020-06-13T18:23:13.603932 | 2019-08-15T22:57:51 | 2019-08-15T22:57:51 | 194,747,721 | 0 | 1 | null | 2019-08-15T22:57:52 | 2019-07-01T21:52:31 | R | UTF-8 | R | false | true | 555 | rd | ClusterCalc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Source_StatsPlotsandTesting.R
\name{ClusterCalc}
\alias{ClusterCalc}
\title{Cluster Calculation}
\usage{
ClusterCalc(P, matrix)
}
\arguments{
\item{P}{a list of parameters}
\item{matrix}{a saved trait from the Basic sims (requires individual data)}
}
\description{
Calculates the cluster score of a matrix.
}
\seealso{
Other Cluster Plots: \code{\link{ClusterPlot}},
\code{\link{GetMaxMat}}, \code{\link{QuickClusterPlot}}
}
\concept{Cluster Plots}
\keyword{stats-plotting}
|
f52a7205081b3ea936e70d270961e9a6d4f9a8a3 | 55e617b353f211fe206109c1b80a527e98f4cd76 | /man/predict.MTL.Rd | 53d9216f4b95bd0711e81e4dc230b9852d720c8e | [] | no_license | hank9cao/RMTL | fae8cf3f644f929248b81654c9749f691c7e7e7f | 50564c7bd26dc673fd1f2604206db7394b755afd | refs/heads/master | 2020-04-26T18:09:05.656661 | 2019-04-16T11:30:18 | 2019-04-16T11:30:18 | 131,158,971 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 891 | rd | predict.MTL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MTL.R
\name{predict.MTL}
\alias{predict.MTL}
\title{Predict the outcomes of new individuals}
\usage{
\method{predict}{MTL}(object, newX = NULL, ...)
}
\arguments{
\item{object}{A trained MTL model}
\item{newX}{The feature matrices of new individuals}
\item{...}{Other parameters}
}
\value{
The predictive outcome
}
\description{
Predict the outcomes of new individuals. For classification, the
probability of the individual being assigned to positive label P(y==1) is estimated, and for regression, the
prediction score is estimated
}
\examples{
#Create data
data<-Create_simulated_data(Regularization="L21", type="Regression")
#Train
model<-MTL(data$X, data$Y, type="Regression", Regularization="L21",
Lam1=0.1, Lam2=0, opts=list(init=0, tol=10^-6, maxIter=1500))
predict(model, newX=data$tX)
}
|
ad11abae08c4fe90797c00f856debd25e7fec1b8 | dc3642ea21337063e725441e3a6a719aa9906484 | /DevInit/R/is_in_use.R | 87fec9bab26ae5329317e7db7199b5a681a04a12 | [] | no_license | akmiller01/alexm-util | 9bbcf613384fe9eefd49e26b0c841819b6c0e1a5 | 440198b9811dcc62c3eb531db95abef8dbd2cbc7 | refs/heads/master | 2021-01-18T01:51:53.120742 | 2020-09-03T15:55:13 | 2020-09-03T15:55:13 | 23,363,946 | 0 | 7 | null | null | null | null | UTF-8 | R | false | false | 1,564 | r | is_in_use.R | wd <- "C:/git/alexm-util/DevInit/R"
setwd(wd)
trunc <- function(str){
newStr <- substr(str,1,nchar(str)-4)
return (newStr)
}
concepts <- read.csv("C:/git/digital-platform/concepts.csv",na.strings="",as.is=TRUE)
concepts <- transform(
concepts
,hasMapTheme = !is.na(map.theme)
)
keep <- c("id","hasMapTheme")
concepts <- concepts[keep]
countryYear <- list.files("C:/git/digital-platform/country-year", pattern="*.csv")
countryYear <- data.frame(sapply(countryYear,trunc),stringsAsFactors=FALSE)
names(countryYear) <- "id"
countryYear$reference <- FALSE
refs <- list.files("C:/git/digital-platform/reference", pattern="*.csv")
refs <- data.frame(sapply(refs,trunc),stringsAsFactors=FALSE)
names(refs) <- "id"
refs$reference <- TRUE
allFiles <- rbind(countryYear,refs)
code <- readLines("C:/git/di-website/wp/wp-content/themes/diConcept/dist/app.js")
used <- logical(nrow(allFiles))
for(i in 1:nrow(allFiles)){
fileName <- allFiles[i,1]
inCode <- max(grepl(fileName,code))
used[i] <- inCode
}
allFiles$used <- used
final <- merge(
allFiles
,concepts
,by="id"
,all=TRUE
)
final[which(is.na(final$used)),]$used <- 0
final[which(is.na(final$hasMapTheme)),]$hasMapTheme <- FALSE
final[which(is.na(final$reference)),]$reference <- FALSE
final <- transform(
final
,in_ddh=used | hasMapTheme
)
keep <- c("id","reference","in_ddh")
final <- final[keep]
#We know these are used, even if they're not in the code
final[which(grepl("oda-donor",final$id)),]$in_ddh <- TRUE
write.csv(final,"in_code_check.csv",na="",row.names=FALSE)
|
2c72bff1f855dc8255bc8697ea12234cb807d428 | ef4d07aed5c6976a2982c01e540f08bd8bab3c5d | /plot1.R | 2cc1d7732b2e11ee9226f73e89f7f8c7579478b9 | [] | no_license | Brufico/ExData_Plotting1 | 2c2d89b2199f023af54d9695819e799053be83a7 | 07bd74afd93341e431641aea927175631a8b802e | refs/heads/master | 2021-01-21T02:41:58.015753 | 2016-01-10T00:33:15 | 2016-01-10T00:33:15 | 49,151,738 | 0 | 0 | null | 2016-01-06T17:53:25 | 2016-01-06T17:53:25 | null | UTF-8 | R | false | false | 2,802 | r | plot1.R | # =======================================================
# Exploratory Data Analysis, Assignment 1
# ========================================================
# libraries
library(lubridate)
# Changing temporarily the "LC_TIME" locale
# save
curloctime <- Sys.getlocale("LC_TIME")
# set
Sys.setlocale("LC_TIME", "English")
### Data handling =======================================
# data location in the work directory ==================
dir <- "exdata_data_household_power_consumption"
fname <- "household_power_consumption.txt"
fpath <- file.path(".",dir,fname)
# selectively read rows from the file ========================
# source of the method: Stackoverflow:
# http://stackoverflow.com/questions/6592219/read-csv-from-specific-row
# read 1st row and get the number of columns
dta_row1 <- read.table(fpath, header = TRUE,
sep=";", na.strings = "?", nrow = 1)
nc <- ncol(dta_row1)
# read now all rows, but skip all columns but the first (a bit slow)
message("Reading first column of a large data file - may take a while...")
# reading. if the class of a column is "NULL", the column is skipped
dta <- read.table(fpath, header = TRUE, sep=";",
as.is = TRUE, na.strings = "?",
colClasses = c(NA, rep("NULL", nc - 1)))
message("computing read.table parameters...")
# make a column of date-times from the strings in col 1.
# (Surprisingly, this is fast enough).
dta$dt <- dmy(dta$Date)
# indices of beginning and end of useful data rows
begin <- ymd("2007-02-01")
end <- ymd("2007-02-02")
nb <- which.max(dta$dt >= begin)
ne <- which.max(dta$dt > end)
# number of rows to read
okrows <- ne - nb
message("Reading relevant data...")
# effectively read the relevant rows
dta <- read.table(fpath, header = TRUE,
sep=";",
skip = nb - 1,
nrows = okrows,
colClasses = c("character",
"character",
rep("numeric", nc - 2) ))
# re-establish variable names lost because of 'skip=...'
# (in lowercase)
colnames(dta) <- tolower(names(dta_row1))
# combine dates & times in column "dt"
dta$dt <- dmy(dta$date) + hms(dta$time)
message("Data ready. Commencing plotting")
### Plotting ===========================================
# Plot 1 ================================================
message("Plot 1 ...")
# Open png device
png(filename = "plot1.png",
width = 480, height = 480)
# create plot
hist(dta$global_active_power,
col="red",
main = "Global Active Power",
ylab = "Frequency",
xlab = "Global Active Power (kilowatts)" )
# close png device
dev.off()
message("Plot 1 done")
# restore time settings
Sys.setlocale("LC_TIME", curloctime)
|
7d18c0a1bc207db20d655e8b130672900d2208a4 | ac1e775f1b359e9854a3e76770f4e1966a20d872 | /src/Filter_Me.R | 7f97e991b91f79283b8c4494d5a585bb7606671a | [] | no_license | matiasandina/USV-Interpolation-CleanUp | 27ef16aa1ff3d34cee89bad51ab84a519bc7964d | ca32b142443006337492e5084897f6a3adae54f0 | refs/heads/master | 2021-09-09T09:50:38.543114 | 2018-03-14T20:29:46 | 2018-03-14T20:29:46 | 109,130,185 | 0 | 0 | null | 2018-02-01T17:46:29 | 2017-11-01T12:43:06 | R | UTF-8 | R | false | false | 5,519 | r | Filter_Me.R | Filter_Me <- function(dataframe,
columns,
end.positions,
please.filter = FALSE,
na.threshold = 55){
## Fix Human error in duration #####
# General Impossible Values
# Duration cannont be less than 2 ms (or 0.002 sec)
# Duration cannot be more than 300 ms
# We want to keep x between 0.002 & 0.4
#dataframe <- dplyr::filter(dataframe, duration>=0.002 & duration<0.4)
#got rid of duration < 0.4 because some labels containing many overlapping
#calls will be longer than this and don't want to lose those
dataframe <- dplyr::filter(dataframe, duration>=0.002)
# This is only useful if you need to subset numeric columns
# aka duration
fix_human <- function(x, lower.bound, upper.bound){
boolean <- logical(length = length(x))
boolean <- ifelse((is.na(x) | (x>lower.bound & x<upper.bound)),
FALSE,
TRUE)
myfix <- ifelse(boolean==T, NA, x)
return(myfix)
}
# 1) Anything below 15000 should go to NA
# 2) Anything above 120000 should go to NA
columns <- enquo(columns)
dataframe <- dataframe %>% mutate_at(.vars = vars(!!columns),
.funs=function(q) fix_human(q,15000,120000))
# These old calls are way easier but have troubles
# dataframe[dataframe < 15000] <- NA THIS LINE WILL GET RID OF DURATION AND WE DONT WANT THAT
# dataframe[dataframe > 120000] <- NA THIS LINE WILL GET RID OF DURATION AND WE DONT WANT THAT
huge.lista <- list()
huge.counter <- 1
for(myend in 1:length(end.positions)){
print(paste0("Trying end.positions value equals ", myend))
# Subset the signal
signal <- dataframe %>%
mutate(ID=1:nrow(dataframe)) %>%
group_by(ID) %>%
select(!!columns)
# We add one to account for ID column in position 1 of data.frame signal
start.chunk <- c(1:myend) + 1
# We subtract the end.positions - 1 from the length to have the last end.positions value from it
# e.g, 51 - end.positions when end.positions=2 will give 49 and 49:51 will give one extra
# we do 51 - (end.positions - 1) so that it will give 50:51
end.chunk <- c((length(signal) - (myend - 1)):length(signal))
ends.to.remove <- c(start.chunk, end.chunk)
# Remove ends
signal <- signal %>% select(-ends.to.remove)
na.frame <- data.frame(is.na(signal))
# Empty list to store the curve
lista <- list()
j <- 1
for(i in seq(10,100,5)){
i <- as.numeric(i)
print(i)
temp <- na.frame %>% mutate(miss = rowSums(.[2:length(na.frame)]),
total= ncol(na.frame)-1,
prop = round(miss/total,3)*100,
accepted = ifelse(prop <= i, "accepted", "rejected"))
temp
number.of.calls <- temp %>% count(accepted) %>% mutate(na.max=i)
lista[[j]] <- number.of.calls
j <- j + 1
print("loop is over, next j")
}
A <- bind_rows(lista) %>% mutate(End.Trim = myend)
# Source helper function
# if(!exists("plot_coverage", mode="function")) source("plot_coverage.R")
huge.lista[[huge.counter]] <- A
huge.counter <- huge.counter + 1
print("Huge loop over, be happy :)")
}
HUGE <- bind_rows(huge.lista)
p <- ggplot(HUGE, aes(na.max, n, group=End.Trim, color=End.Trim)) + geom_line() + facet_wrap(~accepted)
print(p)
# Make summary
print("Here's a summary of the coverage")
#na.max <- enquo(na.max)
#End.Trim <- enquo(End.Trim)
#accepted <- enquo(accepted)
#n <- enquo(n)
summ.cov <- HUGE %>%
group_by(na.max, End.Trim) %>%
mutate(TOTAL=sum(n)) %>%
mutate(percent=n/TOTAL*100) %>%
filter(accepted=="accepted") %>%
mutate(penalty=percent/(na.max*End.Trim)) %>%
arrange(desc(penalty)) # penalty penalizes you for increasing na.max and End.Trim to gain percent
print(summ.cov)
if(please.filter){
if(length(end.positions)>1){
stop("choose only 1 value of end.positions if you want to filter")
}
# Make a table with proportion of accepted, the end.positions that yield that proportion
# and ask to filter
# alberto
print(paste("na.threshold is", na.threshold, ". Do you want to change it?" ))
change.it <- toupper(readline("Y/N>> "))
if(change.it=="Y"){
ask <- readline("Choose your percent (0-100): >")
# If ask is outside boundaries, function has to break
if(as.numeric(ask)<=0 | as.numeric(ask)>=100){
stop("Input has to be a number between 0 and 100. Run function again.")
} else {
na.threshold <- as.numeric(ask)
}
}
# We kinda settled for na.max=55 and End.Trim=1
# run the temp call with the value assigned
temp <- na.frame %>% mutate(miss = rowSums(.[2:length(na.frame)]),
total= ncol(na.frame)-1,
prop = round(miss/total,3)*100,
accepted = ifelse(prop <= na.threshold, "accepted", "rejected"))
# Create ID to store the original information of the your's original data order
temp$ID <- 1:nrow(dataframe)
filtered.data <- dataframe[which(temp$accepted=="accepted"),]
return(filtered.data)
}
return(HUGE)
}
|
cdb6cf414ee65571a8fef0bc9ed84b5ec9d5f3f0 | 77bc65a8c09300842fcdc0e0a70a1d6da9191fb7 | /f - barplot_perc_participants_by_occupation.R | 5936ac68025bebc87b333e714ea29788a4133e17 | [] | no_license | vynguyen92/nhanes_occupational_exposures | 07ad223cda8e6d323e26b535eb84188fc6bba5d6 | ecaec09b6172136c8b9b17e1dce06b7eaa86d88b | refs/heads/main | 2023-08-21T02:04:14.130352 | 2021-10-22T20:05:44 | 2021-10-22T20:05:44 | 420,227,537 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,582 | r | f - barplot_perc_participants_by_occupation.R | barplot_perc_participants_by_occupation <- function(dataset_merged
, include_sector_collar_dataset
, occupation_group
, name_of_folder
, current_directory
, analysis_type)
{
library("tidyverse")
library("gridExtra")
library("grid")
library("RColorBrewer")
# Determine all file names in the current working directory
all_files_in_current_directory <- list.files()
# Make a new folder if the folder doesn't exist
if(name_of_folder %in% all_files_in_current_directory)
{
} else {
dir.create(name_of_folder)
}
# Define a string for the working directory for the new folder
new_working_directory <- paste(current_directory
, name_of_folder
, sep = "/")
# Set the working directory to this new folder
setwd(new_working_directory)
vector_sector_collar_exclude <- include_sector_collar_dataset %>%
filter(include == "no") %>%
pull(all_of(occupation_group))
# print(vector_sector_collar_exclude)
dataset_merged <- dataset_merged %>%
mutate(race = factor(case_when(RIDRETH1 == "_mexican_americans" ~ "Mexican Americans"
, RIDRETH1 == "_other_hispanics" ~ "Other Hispanics"
, RIDRETH1 == "_whites" ~ "Non-Hispanic Whites"
, RIDRETH1 == "_blacks" ~ "Non-Hispanic Blacks"
, RIDRETH1 == "_other_race" ~ "Other Race/Multi-Racial" )
, levels = rev(c("Non-Hispanic Whites"
, "Non-Hispanic Blacks"
, "Mexican Americans"
, "Other Hispanics"
, "Other Race/Multi-Racial" )))) %>%
mutate(sex = factor(if_else(RIAGENDR == "_males", "Males", "Females"))) %>%
mutate(age = RIDAGEYR) %>%
mutate(age_group = factor(case_when(RIDAGEYR < 16 ~ "[0,16)"
, RIDAGEYR >= 16 & RIDAGEYR <= 18 ~ "[16,18]"
, RIDAGEYR > 18 & RIDAGEYR <= 28 ~ "(18,28]"
, RIDAGEYR > 28 & RIDAGEYR <= 38 ~ "(28,38]"
, RIDAGEYR > 38 & RIDAGEYR <= 48 ~ "(38,48]"
, RIDAGEYR > 48 & RIDAGEYR <= 58 ~ "(48,58]"
, RIDAGEYR > 58 & RIDAGEYR <= 68 ~ "(58,68]"
, RIDAGEYR > 68 & RIDAGEYR <= 78 ~ "(68,78]"
, RIDAGEYR > 78 ~ "(78,85]")
, levels = c("[0,16)"
, "[16,18]"
, "(18,28]"
, "(28,38]"
, "(38,48]"
, "(48,58]"
, "(58,68]"
, "(68,78]"
, "(78,85]"))) %>%
mutate(poverty_income_ratio = factor(case_when(INDFMPIR <= 1 ~ "[0,1]"
, INDFMPIR > 1 & INDFMPIR <= 2 ~ "(1,2]"
, INDFMPIR > 2 & INDFMPIR <= 3 ~ "(2,3]"
, INDFMPIR > 3 & INDFMPIR <= 4 ~ "(3,4]"
, INDFMPIR > 4 & INDFMPIR <= 5 ~ "(4,5]")
, levels = c("[0,1]"
, "(1,2]"
, "(2,3]"
, "(3,4]"
, "(4,5]"))) %>%
mutate(smoking = factor(case_when(LBXCOT <= 1 ~ "no smoking"
, LBXCOT > 1 & LBXCOT <= 3 ~ "secondhand smoke"
, LBXCOT > 3 ~ "active smoking")
, levels = c("no smoking"
, "secondhand smoke"
, "active smoking"))) %>%
mutate(cycle = case_when(SDDSRVYR == 1 ~ "1999-2000"
, SDDSRVYR == 2 ~ "2001-2002"
, SDDSRVYR == 3 ~ "2003-2004"
, SDDSRVYR == 4 ~ "2005-2006"
, SDDSRVYR == 5 ~ "2007-2008"
, SDDSRVYR == 6 ~ "2009-2010"
, SDDSRVYR == 7 ~ "2011-2012"
, SDDSRVYR == 8 ~ "2013-2014")) %>%
dplyr::select(all_of(occupation_group)
, "race"
, "age"
, "age_group"
, "sex"
, "poverty_income_ratio"
, "smoking"
, "cycle"
, "WTMECADJ"
, "WTINTADJ") %>%
mutate(occupation_group = "NHANES population")
# print(str(dataset_merged))
dataset_merged_16_up <- dataset_merged %>%
filter(age >= 16) %>%
mutate(occupation_group = "NHANES 16+ population")
subset_merged <- dataset_merged %>%
filter(!(!!rlang::sym(occupation_group) %in% vector_sector_collar_exclude)) %>%
mutate(occupation_group = !!rlang::sym(occupation_group))
all_merged <- full_join(dataset_merged
, subset_merged
, by = colnames(dataset_merged)) %>%
full_join(.
, dataset_merged_16_up
, by = colnames(.)) %>%
filter(!is.na(occupation_group))
covariates <- c("age_group"
, "sex"
, "race"
, "poverty_income_ratio"
, "smoking"
, "cycle"
)
ref_of_covariates <- list("age_group" = c("[0,16)", "[16,18]", "(18,28]")
, "sex" = "Males"
, "race" = "Non-Hispanic Whites"
, "poverty_income_ratio" = "[0,1]"
, "smoking" = "active smoking"
, "cycle" = c("1999-2000", "2001-2002")
)
num_covariates <- length(covariates)
list_perc_plots <- list()
for(i in seq(num_covariates))
{
covariate_i <- covariates[i]
print(covariate_i)
occupation_and_covariate <- c("occupation_group", covariate_i)
if(analysis_type == "unweighted")
{
num_participants_by_occupation <- all_merged %>%
select(all_of(occupation_and_covariate)) %>%
na.omit(.) %>%
group_by_at("occupation_group") %>%
summarise(num_participants = n()) %>%
ungroup(.) %>%
data.frame(.)
# View(num_participants_by_occupation)
num_participants_by_occupation_covariate_i <- all_merged %>%
select(all_of(occupation_and_covariate)) %>%
na.omit(.) %>%
group_by_at(vars(one_of(occupation_and_covariate))) %>%
summarise(num_participants = n()) %>%
ungroup(.) %>%
data.frame(.)
# View(num_participants_by_occupation_covariate_i)
} else if(analysis_type == "weighted") {
num_participants_by_occupation <- all_merged %>%
select(all_of(c(occupation_and_covariate, "WTMECADJ"))) %>%
na.omit(.) %>%
group_by_at("occupation_group") %>%
summarise(num_participants = sum(WTMECADJ)) %>%
ungroup(.) %>%
data.frame(.)
num_participants_by_occupation_covariate_i <- all_merged %>%
select(all_of(c(occupation_and_covariate, "WTMECADJ"))) %>%
na.omit(.) %>%
group_by_at(vars(one_of(occupation_and_covariate))) %>%
summarise(num_participants = sum(WTMECADJ)) %>%
ungroup(.) %>%
data.frame(.)
# View(num_participants_by_occupation_covariate_i)
}
percentage_df <- num_participants_by_occupation_covariate_i %>%
left_join(.
, num_participants_by_occupation
, by = "occupation_group") %>%
mutate(percentage = num_participants.x/num_participants.y*100)
# View(percentage_df)
ref_of_covariate_i <- ref_of_covariates[[covariate_i]]
# print(ref_of_covariate_i)
if(covariate_i %in% c("age_group", "cycle"))
{
ordered_occupation <- percentage_df %>%
filter(!!rlang::sym(covariate_i) %in% ref_of_covariate_i) %>%
group_by_at("occupation_group") %>%
summarise(percentage = sum(percentage)) %>%
ungroup(.) %>%
arrange(percentage) %>%
pull("occupation_group")
} else {
ordered_occupation <- percentage_df %>%
filter(!!rlang::sym(covariate_i) %in% ref_of_covariate_i) %>%
arrange(percentage) %>%
pull("occupation_group")
}
# print(ordered_occupation)
# View(num_participants_by_occupation_covariate_i)
num_participants_by_occupation_covariate_i[,"occupation_group"] <- factor(num_participants_by_occupation_covariate_i[,"occupation_group"]
, levels = ordered_occupation)
covariate_name_i <- gsub("_", " ", covariate_i) %>%
str_to_title
num_participants_by_occupation_covariate_i <- num_participants_by_occupation_covariate_i %>%
mutate(covariate_name = covariate_name_i)
# View(num_participants_by_occupation_covariate_i)
# print(ordered_occupation)
color_category <- case_when(grepl("Blue", ordered_occupation) == TRUE ~ "#0096FF"
, grepl("White", ordered_occupation) == TRUE ~ "black"
, grepl("NHANES", ordered_occupation) == TRUE ~ "#B8860B"
, ordered_occupation %in% c("Going to school"
, "Occupation Missing"
, "Unable to work for health reasons"
, "Retired"
, "Looking for work"
, "On layoff"
, "Disabled"
, "Taking care of house or family") ~ "#A9A9A9")
barplot_num_occupation_covariate <- ggplot(data = num_participants_by_occupation_covariate_i
, aes(x = num_participants
, y = num_participants_by_occupation_covariate_i[,"occupation_group"]
, fill = !!rlang::sym(covariate_i))) +
geom_bar(position = "fill"
, stat = "identity") +
facet_grid(cols = vars(covariate_name)) +
scale_x_continuous(labels = scales::percent_format()) +
scale_fill_brewer(name = element_blank()
, palette = "RdYlBu") +
guides(fill = guide_legend(nrow = 1)) +
theme(legend.position = "top"
, legend.text = element_text(size = 11)
, axis.title.y = element_blank()
, axis.title.x = element_blank()
, axis.text.x = element_text(size = 14)
, axis.text.y = element_text(colour = color_category)
, strip.text = element_text(size = 14))
# Extract a letter to label the stairway plot
letter_i <- LETTERS[i]
# Define a title label of the letter and the position of that title for the physiological indicator
title_i <- textGrob(label = letter_i
, x = unit(0.5, "lines")
, y = unit(0, "lines")
, hjust = 0
, vjust = 0
, gp = gpar(fontsize = 20
, fontface = "bold"))
plot_name.png <- paste("barplots_percentages_"
, covariate_i
, "_"
, analysis_type
, ".png"
, sep = "")
plot_name.pdf <- paste("barplots_percentages_"
, covariate_i
, "_"
, analysis_type
, ".pdf"
, sep = "")
ggsave(filename = plot_name.png
, plot = barplot_num_occupation_covariate
, width = 14
, height = 9)
ggsave(filename = plot_name.pdf
, plot = barplot_num_occupation_covariate
, width = 14
, height = 9)
list_perc_plots[[i]] <- arrangeGrob(barplot_num_occupation_covariate
, top = title_i)
}
panel_perc_plots <- do.call("grid.arrange"
, c(list_perc_plots
, ncol = 2))
panel_perc_plots <- arrangeGrob(panel_perc_plots
, bottom = textGrob("Percentages (%)"
, gp = gpar(fontface = "bold"
, cex = 1.5)
))
# Define file names of the png and pdf versions of the panel of stairway plots
plot_name.png <- paste("barplots_percentages_demographics"
, "_"
, analysis_type
, ".png"
, sep = "")
plot_name.pdf <- paste("barplots_percentages_demographics"
, "_"
, analysis_type
, ".pdf"
, sep = "")
# Save the panel of stairway plots as a png and pdf
print(plot_name.png)
ggsave(filename = plot_name.png
, plot = panel_perc_plots
, width = 25
, height = 20)
print(plot_name.pdf)
ggsave(filename = plot_name.pdf
, plot = panel_perc_plots
, width = 25
, height = 20)
# Set the directory to the folder containing the function and main scripts
setwd(current_directory)
# barplot_num_occupation_covariate
} |
2576a7068feca4953b32dc488681d05513d8439c | e336ef502f2406ed1a77ee12f16217848dc6c27a | /old_scripts/3.4_clustering_nearest_neighbour_empirical.R | dd3930b7b0b78d9229f6687efee2ce02b5d0f4ec | [] | no_license | ksamuk/genome_meta_scripts | 66ead7e467e262a7af4e43e8dc4ed88a3dab3b7b | 6a4d22eac0656f85a1ddefd941874e594c8f54d0 | refs/heads/master | 2021-01-15T23:02:17.648431 | 2015-11-29T19:49:03 | 2015-11-29T19:49:03 | 31,740,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,912 | r | 3.4_clustering_nearest_neighbour_empirical.R | # calculate nearest neighbour distances
library(data.table)
library(dplyr)
library(parallel)
library(ggplot2)
# read in snp file
snp.file <- "stats/snp_filtered/stats_master_variant_2015-06-11.filt.txt"
snp.file <- fread(snp.file)
# remove weird outliers
snp.file[!is.na(snp.file$fst) & snp.file$fst<0, ]$fst <- NA
# find outliers
is.outlier <- function(x){
x95 <- quantile(x, na.rm = TRUE, probs = 0.95)[1]
return(x >=x95)
}
snp.file$fst <- as.numeric(snp.file$fst)
snp.file<-snp.file%>%
group_by(study_com)%>%
mutate(fst.outlier = is.outlier(fst)) %>%
ungroup
str(snp.file)
# calculate nn dist
nndist.lg <- function (lg.df) {
site.sample <- lg.df %>%
arrange(gen.pos) %>%
select(gen.pos) %>%
mutate(dist.1 = c(NA,diff(gen.pos))) %>%
mutate(dist.2 = c(diff(sort(gen.pos)),NA))
nn.dist <- rep(NA, length(site.sample$genpos))
for (i in 1:length(site.sample$gen.pos)){
if(!is.na(site.sample$dist.1[i]) & !is.na(site.sample$dist.2[i])){
nn.dist[i] <- min(c(site.sample$dist.1[i],site.sample$dist.2[i]))
}else if(is.na(site.sample$dist.1[i])){
nn.dist[i] <- site.sample$dist.2[i]
} else if(is.na(site.sample$dist.2[i])){
nn.dist[i] <- site.sample$dist.1[i]
}
}
return(mean(nn.dist))
}
for (i in unqiue(snp.file$study_com)){
snp.sub <- subset(snp.file)
}
split.df <- split(snp.file, snp.file$study_com)
null.distances <- mclapply(split.df, permute_distances_snp_list, num.samples = 10000, mc.cores = 6, mc.silent = FALSE)
null.distances <- do.call("rbind", null.distances)
rownames(null.distances) <- NULL
write.table(null.distances, file="null_snp_distances.txt", row.names = FALSE)
# GRAB EMMPIRICAL NNDs
add.geo <- function (x) {
geography<-"parapatric.d"
if(grepl("allopatricD",x)==TRUE){
geography<-"allopatric.d"
}
if(grepl("allopatricS",x)==TRUE){
geography<-"allopatric.s"
}
if(grepl("parapatricS",x)==TRUE){
geography<-"parapatric.s"
}
if(grepl("japan",x)==TRUE){
geography<-"parapatric.d"
}
return(geography)
}
nndist.df<- read.table("test.distances.txt",stringsAsFactors = FALSE,header=TRUE)
nndist.df$geography <- sapply(nndist.df$study_com, add.geo)
melt(nndist, id.vars = c("fst.outlier"))
nndist.df <- nndist.df %>%
filter(!is.na(fst.outlier))
nn.true <- subset(nndist.df, nndist.df$fst.outlier ==TRUE) %>% select(study_com,lg,nndist)
names(nn.true)[3]<-"nn.true"
nn.false <- subset(nndist.df, nndist.df$fst.outlier ==FALSE) %>% select(study_com,lg,nndist)
names(nn.false)[3]<-"nn.false"
nn.matched <- left_join(nn.true,nn.false)
nn.matched$diff <- nn.matched$nn.false/nn.matched$nn.true
nn.matched$geography <- sapply(nn.matched$study_com, add.geo)
nn.matched %>%
filter(diff < 100) %>%
ggplot(aes(x = study_com, y = log(diff+1), fill = geography))+
geom_boxplot()+
facet_grid(~geography, scales = "free_x")
|
8bcba8ff22ac0bb1c7733f93ca1c6b08b77eed57 | 27e2240acae4c110b4a853f67eaabfb317d8ec93 | /lab/tidyverse/lab02_zadania_ek.R | 3b88eab87b8e423e1ff9d2ae1800e67d149bf0bf | [] | no_license | pbiecek/AdvancedR2018 | 97dbf5fc71d650b8b3f3097f5d82ccb01adc3043 | 26bfd79c0c9fd8fd95185a9afe210e9f7a9d4627 | refs/heads/master | 2018-09-19T10:54:58.171788 | 2018-06-13T10:44:23 | 2018-06-13T10:44:23 | 125,171,416 | 9 | 28 | null | 2018-06-13T10:44:24 | 2018-03-14T07:17:46 | HTML | UTF-8 | R | false | false | 5,268 | r | lab02_zadania_ek.R | ########################################### ZADANIA ######################################################
library(PogromcyDanych)
library(dplyr)
library(tidyr)
head(auta2012)
# 1. Która Marka występuje najczęściej w zbiorze danych auta2012?
auta2012 %>% group_by(Marka) %>% summarise(n=n()) %>% filter(n==max(n))
# 2. Spośród aut marki Toyota, który model występuje najczęściej.
auta2012 %>% filter(Marka=="Toyota") %>% group_by(Model) %>% summarise(n=n()) %>% filter(n==max(n))
# 3. Sprawdź ile jest aut z silnikiem diesla wyprodukowanych w 2007 roku?
auta2012 %>% filter(Rodzaj.paliwa=="olej napedowy (diesel)", Rok.produkcji == 2007) %>% nrow()
# 4. Jakiego koloru auta mają najmniejszy medianowy przebieg?
auta2012 %>% group_by(Kolor) %>% summarise(mediana = median(Przebieg.w.km, na.rm=TRUE)) %>%
arrange(mediana) %>% head(1)
# 5. Gdy ograniczyć się tylko do aut wyprodukowanych w 2007,
# która Marka występuje najczęściej w zbiorze danych auta2012?
auta2012 %>% filter(Rok.produkcji == 2007) %>% group_by(Marka) %>% summarise(n=n()) %>% filter(n==max(n))
# 6. Spośród aut marki Toyota, który model najbardziej stracił na cenie pomiędzy rokiem produkcji 2007 a 2008.
auta2012 %>% filter(Marka == "Toyota", Rok.produkcji %in% c(2007,2008)) %>% group_by(Model,Rok.produkcji) %>%
summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% spread(Rok.produkcji, sr_cena) %>%
mutate(roznica = `2008` - `2007`) %>% ungroup %>% filter(roznica == min(roznica, na.rm=TRUE))
# 7. Spośród aut z silnikiem diesla wyprodukowanych w 2007 roku która marka jest najdroższa?
auta2012 %>% filter(Rodzaj.paliwa=="olej napedowy (diesel)", Rok.produkcji == 2007) %>%
group_by(Marka) %>% summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% filter(sr_cena==max(sr_cena))
# 8. Ile jest aut z klimatyzacją?
auta2012 %>% select(Wyposazenie.dodatkowe) %>% filter(grepl("klimatyzacja", Wyposazenie.dodatkowe)) %>% nrow()
# 9. Gdy ograniczyć się tylko do aut z silnikiem ponad 100 KM,
# która Marka występuje najczęściej w zbiorze danych auta2012?
auta2012 %>% filter(KM > 100) %>% group_by(Marka) %>% summarise(n=n()) %>% filter(n==max(n))
# 10. Spośród aut marki Toyota, który model ma największą różnicę cen gdy porównać silniki benzynowe a diesel?
auta2012 %>% filter(Marka == "Toyota", Rodzaj.paliwa %in% c("benzyna", "olej napedowy (diesel)")) %>% group_by(Model,Rodzaj.paliwa) %>%
summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% spread(Rodzaj.paliwa, sr_cena) %>%
mutate(roznica = benzyna - `olej napedowy (diesel)`) %>% ungroup %>% filter(roznica == max(abs(roznica), na.rm=TRUE))
# 11. Spośród aut z silnikiem diesla wyprodukowanych w 2007 roku która marka jest najtańsza?
auta2012 %>% filter(Rodzaj.paliwa=="olej napedowy (diesel)", Rok.produkcji == 2007) %>%
group_by(Marka) %>% summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% filter(sr_cena==min(sr_cena))
# 12. W jakiej marce klimatyzacja jest najczęściej obecna?
auta2012 %>% filter(grepl("klimatyzacja", Wyposazenie.dodatkowe)) %>% group_by(Marka) %>% summarise(n = n()) %>% filter(n == max(n))
# 13. Gdy ograniczyć się tylko do aut o cenie ponad 50 000 PLN, która Marka występuje najczęściej w zbiorze danych auta2012?
auta2012 %>% filter(Cena.w.PLN>50000) %>% group_by(Marka) %>% summarise(n=n()) %>% filter(n==max(n))
# 14. Spośród aut marki Toyota, który model ma największy medianowy przebieg?
auta2012 %>%
filter(Marka == "Toyota") %>%
group_by(Model) %>% summarise(mediana = median(Przebieg.w.km, na.rm=TRUE)) %>%
arrange(desc(mediana)) %>% head(1)
# 15. Spośród aut z silnikiem diesla wyprodukowanych w 2007 roku który model jest najdroższy?
auta2012 %>%
filter(Rodzaj.paliwa=="olej napedowy (diesel)", Rok.produkcji == 2007) %>%
group_by(Model) %>% summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% filter(sr_cena==max(sr_cena))
# 16. W jakim modelu klimatyzacja jest najczęściej obecna?
auta2012 %>% filter(grepl("klimatyzacja", Wyposazenie.dodatkowe)) %>% group_by(Model) %>% summarise(n = n()) %>% filter(n == max(n))
# 17. Gdy ograniczyć się tylko do aut o przebiegu poniżej 50 000 km o silniku diesla,
# która Marka występuje najczęściej w zbiorze danych auta2012?
auta2012 %>% filter(Przebieg.w.km<50000, Rodzaj.paliwa=="olej napedowy (diesel)") %>%
group_by(Marka) %>% summarise(n=n()) %>% filter(n==max(n))
# 18. Spośród aut marki Toyota wyprodukowanych w 2007 roku, który model jest średnio najdroższy?
auta2012 %>% filter(Marka=="Toyota", Rok.produkcji==2007) %>% group_by(Model) %>% summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>%
ungroup %>% filter(sr_cena==max(sr_cena))
# 19. Spośród aut z silnikiem diesla wyprodukowanych w 2007 roku który model jest najtańszy?
auta2012 %>% filter(Rok.produkcji==2007, Rodzaj.paliwa=="olej napedowy (diesel)") %>%
group_by(Model) %>% summarise(sr_cena = mean(Cena.w.PLN, na.rm=TRUE)) %>% filter(sr_cena==min(sr_cena))
# 20. Jakiego koloru auta mają największy medianowy przebieg?
auta2012 %>% group_by(Kolor) %>% summarise(mediana = median(Przebieg.w.km, na.rm=TRUE)) %>%
arrange(desc(mediana)) %>% head(1)
|
9cf4974afaff4f6027b38c03c2a34a0238edd8b7 | 5d57f98cdaba0494a0a11b3b99a2adbfaeee59c8 | /PBMCs_PreProcessing.R | 9d891d663f1072206e9f724ddb551fcb0bae7a9b | [] | no_license | yingstat/scDatasets | 3674f8f262583870ff0d4e0f3e510f2ff0bc6d41 | b5970dec038d3097b12bd7cae0c281cff3dad33f | refs/heads/main | 2023-01-25T00:51:48.556846 | 2020-12-06T01:45:42 | 2020-12-06T01:45:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,832 | r | PBMCs_PreProcessing.R | # Estimated purity:
# monocytes: 98%
# B cells: 100%
# CD34+: 45%
# CD4+ Helper T cells: 99%
# CD4+CD25+ T-reg: 95%
# CD4+CD45RA+/CD25- Naive T cells: 98%
# CD4+/CD45RO+ Memory T cells: 98%
# CD56+ Natural Killer Cells: 92%
# CD8+ Cytotoxic T cells: 98%
# CD8+/CD45RA+ Naive Cytotoxic T cells: 99%
set.seed(134)
arg_list <- c("cd14_monocytes_filtered_gene_bc_matrices.tar.gz",
"b_cells_filtered_gene_bc_matrices.tar.gz",
# "cd34_filtered_gene_bc_matrices.tar.gz",
"cd4_t_helper_filtered_gene_bc_matrices.tar.gz",
"regulatory_t_filtered_gene_bc_matrices.tar.gz",
"naive_t_filtered_gene_bc_matrices.tar.gz",
"memory_t_filtered_gene_bc_matrices.tar.gz",
"cd56_nk_filtered_gene_bc_matrices.tar.gz",
"cytotoxic_t_filtered_gene_bc_matrices.tar.gz",
"naive_cytotoxic_filtered_gene_bc_matrices.tar.gz")
Reference_types <- c("monocyte", "b_cell", "t_helper", "t_reg", "t_naive", "t_mem", "nk", "t_cyto", "t_naive_cyto")
Profiles <- list();
Subset <- list();
for (i in 1:length(arg_list)) {
#args <- commandArgs(trailingOnly=TRUE)
system(paste("tar -xvzf ",arg_list[i]));
files <- c("filtered_matrices_mex/hg19/matrix.mtx","filtered_matrices_mex/hg19/barcodes.tsv", "filtered_matrices_mex/hg19/genes.tsv")
counts <- read.delim(files[1], stringsAsFactors=FALSE, sep=" ", header=FALSE)
barcode <- read.delim(files[2], stringsAsFactors=FALSE, header=FALSE)
genes <- read.delim(files[3], stringsAsFactors=FALSE, header=FALSE)
comments <- grep("%", counts[,1])
counts <- counts[-comments,]
counts <- counts[!is.na(counts[,1]) & !is.na(counts[,2]) & !is.na(counts[,3]),]
#counts[,3] <- log2(as.numeric(counts[,3]) +1) # for randomforest
require("Matrix")
mat <- sparseMatrix(i=as.numeric(counts[,1]), j=as.numeric(counts[,2]), x = as.numeric(counts[,3]), dimnames = list(genes[,2], barcode[,1]))
Data <- list(counts = mat, type = Reference_types[i])
parsed_filename <- strsplit(arg_list[i], "filtered")
saveRDS(Data, file=paste(parsed_filename[[1]][1], "dataset.rds", sep=""))
Profiles[[i]] <- rowMeans(mat)
exclude <- unique(sort(c(grep("^RPS",rownames(mat)), grep("^RPL", rownames(mat)), grep("^MT-", rownames(mat)), which(Profiles[[i]] < 0.1), which(rownames(mat)=="AC002321.1"))))
require("proxy")
res <- proxy::simil(t(Profiles[[i]][-exclude]), t(as.matrix(mat[-exclude,])), method="cosine")
keep <- which(res > quantile(res, probs=0.9))
set.seed(141)
keep <- runif(n=ncol(mat)) < 0.1; # Keep 10% sample
Subset[[i]] <- mat[,keep]
}
P_Mat <- cbind(Profiles[[1]],Profiles[[2]], Profiles[[3]], Profiles[[4]], Profiles[[5]], Profiles[[6]], Profiles[[7]], Profiles[[8]], Profiles[[9]])
#colnames(P_Mat) <- c("Mono", "B cells", "T-help", "T-reg", "T-naive", "T-mem", "NK", "T-cyto", "T-naive-cyto")
colnames(P_Mat) <- Reference_types
sf <- colSums(P_Mat)
P_Mat <- t(t(P_Mat)/sf*median(sf))
P_Mat <- log2(P_Mat+1)
P_max <- apply(P_Mat, 1, max)
P_min <- apply(P_Mat, 1, min)
saveRDS(P_Mat, file="reference_logmat.rds")
Features <- P_max/P_min > 2 & P_max > 0.1
# T-cell Features
T_mat <- P_Mat[,-c(1,2,7)]
Features2 <- apply(T_mat,1,max)/apply(T_mat,1,min) & P_max > 0.1
cosineDist <- function(x){
as.dist(1 - x%*%t(x)/(sqrt(rowSums(x^2) %*% t(rowSums(x^2)))))
}
cosineSim <- function(x){
x%*%t(x)/(sqrt(rowSums(x^2) %*% t(rowSums(x^2))))
}
# Fancy Feature selection (Balanced)
Potential <- P_max > 0.05
Pot_Mat <- P_Mat[Potential,]
score <- P_max[Potential]/P_min[Potential]
ID <- apply(P_Mat, 1, function(x){which(x==max(x))[1]})
ID <- ID[Potential]
reorder <- order(-score)
ID <- ID[reorder]
Features3 <- vector();
for (i in 1:ncol(P_Mat)) {
Features3 <- c(Features3,names(ID[ID==i])[1:200])
}
Features3 <- rownames(P_Mat) %in% Features3
# Check Assignments
Assigned <- matrix(0, ncol=length(arg_list), nrow=length(arg_list))
for(i in 1:length(arg_list)) {
system(paste("tar -xvzf ",arg_list[i]));
files <- c("filtered_matrices_mex/hg19/matrix.mtx","filtered_matrices_mex/hg19/barcodes.tsv", "filtered_matrices_mex/hg19/genes.tsv")
counts <- read.delim(files[1], stringsAsFactors=FALSE, sep=" ", header=FALSE)
barcode <- read.delim(files[2], stringsAsFactors=FALSE, header=FALSE)
genes <- read.delim(files[3], stringsAsFactors=FALSE, header=FALSE)
comments <- grep("%", counts[,1])
counts <- counts[-comments,]
counts <- counts[!is.na(counts[,1]) & !is.na(counts[,2]) & !is.na(counts[,3]),]
require("Matrix")
mat <- sparseMatrix(i=as.numeric(counts[,1]), j=as.numeric(counts[,2]), x = as.numeric(counts[,3]), dimnames = list(genes[,2], barcode[,1]))
require("proxy")
res <- proxy::simil(t(P_Mat[Features,]), t(as.matrix(mat[Features,])), method="cosine")
correct <- res[i,] == apply(res,2,max)
assign <- apply(res,2,function(x){if(max(x) > 0.7){which(x == max(x))[1]} else{NA}})
factor_counts <- function(vec) {
x <- split(seq(length(vec)), vec)
result <- sapply(x, function(a) length(a))
return(result)
}
Assigned[i,] <- factor_counts(factor(assign, levels=1:9))
}
# RandomForest
test <- cbind(as.matrix(Subset[[1]]), as.matrix(Subset[[2]]), as.matrix(Subset[[3]]), as.matrix(Subset[[4]]), as.matrix(Subset[[5]]), as.matrix(Subset[[6]]), as.matrix(Subset[[7]]), as.matrix(Subset[[8]]), as.matrix(Subset[[9]]))
test_lab <- rep( c("Mono", "B cells", "T-help", "T-reg", "T-naive", "T-mem", "NK", "T-cyto", "T-naive-cyto"), times=c(ncol(Subset[[1]]), ncol(Subset[[2]]), ncol(Subset[[3]]), ncol(Subset[[4]]), ncol(Subset[[5]]), ncol(Subset[[6]]), ncol(Subset[[7]]), ncol(Subset[[8]]), ncol(Subset[[9]])))
require("randomForest")
res <- randomForest(t(test), factor(test_lab), ntree=50, keep.forest=TRUE) # SLoW
# still 30-60% error rate for T-cell sub-types
Features_names <- rownames(P_Mat)[Features]
### Assign Data using Reference ###
to_assign <- c("fresh_68k_pbmc_donor_a_filtered_gene_bc_matrices.tar.gz",
"frozen_pbmc_donor_a_filtered_gene_bc_matrices.tar.gz",
"frozen_pbmc_donor_b_filtered_gene_bc_matrices.tar.gz",
"frozen_pbmc_donor_c_filtered_gene_bc_matrices.tar.gz",
"pbmc33k_filtered_gene_bc_matrices.tar.gz",
"pbmc3k_filtered_gene_bc_matrices.tar.gz",
"pbmc4k_filtered_gene_bc_matrices.tar.gz",
"pbmc6k_filtered_gene_bc_matrices.tar.gz",
"pbmc8k_filtered_gene_bc_matrices.tar.gz")
Assigned_new <- matrix(0, ncol=length(arg_list), nrow=length(to_assign))
for (i in 1:length(to_assign)) {
system(paste("tar -xvzf ", to_assign[i], "> tmp.txt"));
files <- as.vector(read.table("tmp.txt")[,1])
mat_file <- files[grep("matrix.mtx", files)]
bar_file <- files[grep("barcodes", files)]
gen_file <- files[grep("genes", files)]
counts <- read.delim(mat_file, stringsAsFactors=FALSE, sep=" ", header=FALSE)
barcode <- read.delim(bar_file, stringsAsFactors=FALSE, header=FALSE)
genes <- read.delim(gen_file, stringsAsFactors=FALSE, header=FALSE)
comments <- grep("%", counts[,1])
counts <- counts[-comments,]
counts <- counts[!is.na(counts[,1]) & !is.na(counts[,2]) & !is.na(counts[,3]),]
require("Matrix")
mat <- sparseMatrix(i=as.numeric(counts[,1]), j=as.numeric(counts[,2]), x = as.numeric(counts[,3]), dimnames = list(genes[,2], barcode[,1]))
mat <- mat[rownames(mat) %in% rownames(P_Mat),]
P_Mat_tmp <- P_Mat[match(rownames(mat), rownames(P_Mat)),]
# Get Cluster data
parsed_filename <- strsplit(to_assign[i], "filtered")
clust_file <- paste(parsed_filename[[1]][1], "analysis_k10.csv.gz", sep="")
comp_clust <- read.table(clust_file, stringsAsFactors=FALSE, header=TRUE, sep=",")
# Assign labels by reference
require("proxy")
res <- proxy::simil(t(P_Mat_tmp[rownames(P_Mat_tmp) %in% Features_names,]),
t(as.matrix(mat[rownames(mat) %in% Features_names,])), method="cosine")
correct <- res[i,] == apply(res,2,max)
assign <- apply(res,2,function(x){if(max(x) > 0.7){which(x == max(x))[1]} else{NA}})
Data <- list(counts = mat, clusters = comp_clust[,2], assigned = Reference_types[assign])
saveRDS(Data, file=paste(parsed_filename[[1]][1], "dataset.rds", sep=""))
Assigned_new[i,] <- factor_counts(factor(assign, levels=1:9))
}
args <- commandArgs(trailingOnly=TRUE)
args <- c("filtered_gene_bc_matrices/hg19/matrix.mtx","filtered_gene_bc_matrices/hg19/barcodes.tsv","filtered_gene_bc_matrices/hg19/genes.tsv", "pbmc3k_analysis_k10.csv.gz", "out")
counts <- read.delim(args[1], stringsAsFactors=FALSE, sep=" ", header=FALSE)
barcode <- read.delim(args[2], stringsAsFactors=FALSE, header=FALSE)
genes <- read.delim(args[3], stringsAsFactors=FALSE, header=FALSE)
clust <- read.delim(args[4], header=TRUE, sep=",", stringsAsFactors=FALSE)
out <- args[5];
comments <- grep("%", counts[,1])
counts <- counts[-comments,]
counts <- counts[!is.na(counts[,1]) & !is.na(counts[,2]) & !is.na(counts[,3]),]
require("Matrix")
mat <- sparseMatrix(i=as.numeric(counts[,1]), j=as.numeric(counts[,2]), x = as.numeric(counts[,3]), dimnames = list(genes[,1], barcode[,1]))
require("CellTypeProfiles")
# auto_QC
Ts <- colSums(mat)
Ds <- colSums(mat > 0)
tot <- quantile(Ts, probs=c(0.25, 0.5, 0.75))
fea <- quantile(Ds, probs=c(0.25, 0.5, 0.75))
#outliers1 <- abs(Ts - tot[2]) > 3.5*(mean(tot[2]-tot[1], tot[3]-tot[2]))
#outliers2 <- abs(Ds - fea[2]) > 3.5*(mean(fea[2]-fea[1], fea[3]-fea[2]))
outliers1 <- abs(Ts - tot[2]) > (tot[2]-min(Ts))
outliers2 <- abs(Ds - fea[2]) > (fea[2]-min(Ds))
mat <- mat[, !(outliers1 | outliers2)]
clust <- clust[!(outliers1 | outliers2),]
clust_sizes <- factor_counts(clust[,2])
exclude <- names(clust_sizes)[clust_sizes<=1]
mat <- mat[,!clust[,2] %in% exclude]
clust <- clust[!clust[,2] %in% exclude,]
profiles <- CellTypeProfiles::get_cluster_profile(mat, clust[,2], feature_selection=m3drop.features, norm_method="CPM")
saveRDS(profiles, file=paste(out,"_profile.rds", sep=""))
saveRDS(mat, file=paste(out,"_mat.rds", sep=""))
|
95f73ed9d76e98f8287422f25783538c2fc3b27a | 1110578c35c464ef8cd91e59253b4e6ee3a9fa4f | /R/startup.R | c941e7a36f5b1903d9b286b24ad013c3316722ee | [
"MIT"
] | permissive | akram-mohammed/rplos | 1c39e3c2da55edb44eb0a781dbf613cf025e0ac8 | e7b7bdfddc100aabb6e60ca1cd13dedd623f7c64 | refs/heads/master | 2021-01-18T10:32:52.517548 | 2015-03-07T16:30:05 | 2015-03-07T16:30:05 | 32,147,318 | 1 | 0 | null | 2015-03-13T09:31:16 | 2015-03-13T09:31:14 | R | UTF-8 | R | false | false | 227 | r | startup.R | .onAttach <- function(...) {
packageStartupMessage("\n\n New to rplos? Tutorial at http://ropensci.org/tutorials/rplos_tutorial.html. Use suppressPackageStartupMessages() to suppress these startup messages in the future\n")
} |
e330246a93e704b48800f49bf55bf357bbf53995 | a0f098c57271c5e0f14003d1fa2558949696daad | /Assessment_1/4) b) Sequence_numbers.R | 0f9d1297deacc3e7e057dff4797743420e4f1723 | [] | no_license | Aswinraj-023/R_programming_CA_1 | c436896b3381484a89b24d431fb6ff2764299035 | 174a7549936b82dbc7fe2585e2015ad48605ed32 | refs/heads/main | 2023-08-30T14:14:46.104576 | 2021-11-08T18:50:09 | 2021-11-08T18:50:09 | 425,949,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 311 | r | 4) b) Sequence_numbers.R | # 4) b) Printing a sequence
# Unique ID : E7321008
sequence_num <- seq(1 : 6) #creating a sequence of numbers from 1 to 6
num <- 0 #value to be incremented
for (i in sequence_num){ # Accessing each position in sequence_num
num = num + i # incrementing 'num'
print(num) #printing values in 'num'
}
|
8c2523edfa445eb804136aea4117a7c51f89ef0a | 4b2398a4ea26a0797bcd3d57ed37ae4ac5777933 | /report/adapt_metadata.R | bba61211a5400f3b2d5af2b59512a35679403ad5 | [] | no_license | jamesdalg/Goldfeld_EPIC_methylation_array_analysis | a4bfe50c7faff60cef71b7e3bd3d3ac027f106a2 | 8baf8f607e9ab9e8a248ad6868b87872ba40c992 | refs/heads/master | 2021-09-26T17:35:12.772605 | 2018-10-31T21:03:56 | 2018-10-31T21:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,327 | r | adapt_metadata.R | #metadata<- rio::import(paste0(dataDirectory,"/SampleSheet.csv"))
metadata<-metadata %>% tidyr::separate(TargetID, c("Slide","Array","AVG","Beta"),remove = FALSE) %>%
dplyr::select(-c(AVG,Beta)) %>% dplyr::rowwise() %>%
dplyr::mutate(Basename=paste0(dataDirectory,"/",Slide,"/",Slide,"_",Array))
colnames(metadata)[2] <-"Sample_Group"
rio::export(metadata,file=paste0(dataDirectory,"/metadata_final.csv"))
#/Users/victorbarrera/orch_P/PIs/anne_goldfeld/TBHIV_850K/data/201557540002/201557540002_R02C01
#####
metadata <- rio::import("~/Desktop/Projects/anne_goldfeld/TBHIV_850K/data/metadata_corrected.csv")
colnames(metadata)[2] <-"Sample_Group"
colnames(metadata) <- gsub(" ","_",tolower(colnames(metadata)))
metadata$sample_name<- gsub(" ","_",metadata$sample_name)
metadata$sentrix_id <- as.character(metadata$sentrix_id)
metadata$sentrix_position <- as.character(metadata$sentrix_position)
metadata<-metadata %>%
dplyr::mutate(Basename=paste0(dataDirectory,"/",sentrix_id,"/",sentrix_id,"_",
sentrix_position))
metadata$gender <- ifelse(metadata$gender == 0, "M", "F")
metadata$arm <- ifelse(metadata$arm == 0, "Early", "Late")
metadata$iris <- ifelse(metadata$iris == 0, "no", "yes")
rio::export(metadata,file=paste0(dataDirectory,"/metadata_final.csv"))
|
7ff307c2c4fa1cc15af63c3358cde6aa1ffeb99c | 2da2406aff1f6318cba7453db555c7ed4d2ea0d3 | /inst/snippet/ice06.R | 47c4df926e8a6e8184caadb9f8616c81f89c2211 | [] | no_license | rpruim/fastR2 | 4efe9742f56fe7fcee0ede1c1ec1203abb312f34 | d0fe0464ea6a6258b2414e4fcd59166eaf3103f8 | refs/heads/main | 2022-05-05T23:24:55.024994 | 2022-03-15T23:06:08 | 2022-03-15T23:06:08 | 3,821,177 | 11 | 8 | null | null | null | null | UTF-8 | R | false | false | 80 | r | ice06.R | ice.trt <- lm(t1930 - b1930 ~ treatment * location, data = Ice)
anova(ice.trt)
|
ec7d3585d8e508613438c5a5f12830d8c32998ff | fa0c02b6efd408d29739f413481d9ee2eb75fbe0 | /imdb.R | 23cad19407e361942f20c6c08c4f6604e146ecfb | [] | no_license | Rohika379/Text-mining | e18ab8846abd7b5324341296560e06c622680f49 | cf276454679c0f7a71657f41d6ba0eab029b6183 | refs/heads/main | 2023-05-07T17:29:03.187412 | 2021-05-31T06:27:33 | 2021-05-31T06:27:33 | 372,403,891 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,726 | r | imdb.R |
######### IMDB ###########
library(rvest)
IMDB <- "https://www.imdb.com/title/tt1477834/reviews?ref_=tt_ov_rt"
TZP <- NULL
for (i in 1){
murl <- read_html(as.character(paste(IMDB,i,sep='=')))
rev <- murl %>% html_nodes(".show-more__control") %>% html_text()
TZP <- c(TZP,rev)
}
write.table(TZP,"Aquaman.txt")
getwd()
#### Sentiment Analysis ####
txt <- TZP
str(txt)
length(txt)
View(txt)
# install.packages("tm")
library(tm)
# Convert the character data to corpus type
x <- Corpus(VectorSource(txt))
x <- tm_map(x, function(x) iconv(enc2utf8(x), sub='byte'))
# Data Cleansing
x1 <- tm_map(x, tolower)
x1 <- tm_map(x1, removePunctuation)
x1 <- tm_map(x1, removeNumbers)
x1 <- tm_map(x1, removeWords, stopwords('english'))
inspect(x1[1])
# striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm
dtm <- t(tdm) # transpose
dtm <- DocumentTermMatrix(x1)
# To remove sparse entries upon a specific value
corpus.dtm.frequent <- removeSparseTerms(tdm, 0.99)
tdm <- as.matrix(tdm)
dim(tdm)
tdm[1:20, 1:20]
inspect(x[1])
# Bar plot
w <- rowSums(tdm)
w
w_sub <- subset(w, w >= 10)
w_sub
barplot(w_sub, las=2, col = rainbow(10))
# Term laptop repeats maximum number of times
x1 <- tm_map(x1, removeWords, c('aquaman','film'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm
tdm <- as.matrix(tdm)
tdm[100:109, 1:20]
# Bar plot after removal of the term 'phone'
w <- rowSums(tdm)
w
w_sub <- subset(w, w >=5)
w_sub
barplot(w_sub, las=2, col = rainbow(10))
|
0c3fd05195340ade0707ff50716b878baa108428 | 765d69a8c6130aa7a6a6db2e150b09c92d2b054d | /R/SummaryData.R | eec2ee8498a3dbcaf01649c8afb5aa1731c06b01 | [] | no_license | atrigila/selectionr | 53fe0e06ea2e82b93ddee3c68dc45681f6643e12 | aae96ee5b9660a84e53a748a7e0c2945f511523e | refs/heads/master | 2022-12-06T00:02:46.790558 | 2020-08-29T04:27:55 | 2020-08-29T04:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,076 | r | SummaryData.R | #' Summary statistics and positively selected sites
#'
#' This function allows you to summarize several mlc files from PAML into a single table.
#'
#' @param gene.list character; Gene name (file name) of the MSA
#' @param Ho.directory character; Directory where PAML null hypothesis are located
#' @param Ha.directory character; Directory where PAML alternative hypothesis are located
#' @param summary.directory Dcharacter; irectory summarized PAML files will be located
#' @keywords summary
#' @importFrom stringr str_extract
#' @importFrom stats pchisq
#' @import dplyr
#' @import stringr
#' @export summ.statistics.paml
#' @return A table with the selected sites for each gene
summ.statistics.paml <- function (gene.list, Ho.directory, Ha.directory, summary.directory) {
summary.statistics.table <- data.frame(gene.name = character(), HaLnL = character(), HoLnL = character (), stringsAsFactors = FALSE)
error.table <- data.frame(gene.name = character())
for (gene.name in gene.list){
print(gene.name) #gene.name <- "KITLG"
# 1. Read H0 file
file.name <- paste0("mlc_H0_",gene.name)
file.ho.location <- paste0(Ho.directory,"/", file.name)
lines.ho <- readLines(file.ho.location)
# 2. Read Ha file
file.name <- paste0("mlc_Ha_",gene.name)
file.ha.location <- paste0(Ha.directory,"/", file.name)
lines.ha <- readLines(file.ha.location)
# 2.1. Which lines, start with "lnL(ntime: 0 np: 5): "?
lines.start.array.ho <- startsWith(lines.ho, "lnL")
pos.start.ho <- which(lines.start.array.ho == TRUE)
lines.start.array.ha <- startsWith(lines.ha, "lnL")
pos.start.ha <- which(lines.start.array.ha == TRUE)
# 3. Which lines end with "+0.000000"?
lines.end.array.ho <- endsWith(lines.ho, "+0.000000")
pos.end.ho <- which(lines.end.array.ho == TRUE)
lines.end.array.ha <- endsWith(lines.ha, "+0.000000")
pos.end.ha <- which(lines.end.array.ha == TRUE)
# 4. indices util inicio y final, lineas utiles <- lines[inicio, final]
if(length(pos.start.ho & pos.start.ha) == 0) {
print("ERROR")
error.table.gene <- data.frame("gene.name" = as.character(gene.name))
error.table <- rbind(error.table,error.table.gene)
} else {
lineas.utiles.ho <- lines.ho[pos.start.ho:pos.end.ho]
clean.useful.lines.ho <- stringr::str_extract(lineas.utiles.ho,"-[:digit:]+\\.[:digit:]+")
lineas.utiles.ha <- lines.ha[pos.start.ha:pos.end.ha]
clean.useful.lines.ha <- stringr::str_extract(lineas.utiles.ha,"-[:digit:]+\\.[:digit:]+")
options(digits = 11)
clean.useful.lines.ha <- as.numeric(clean.useful.lines.ha)
#5. Create a data frame with Ha Lnl, Ho Lnl a
summary.statistics.gene <- data.frame("gene.name" = as.character(gene.name), "HaLnL" = clean.useful.lines.ha, "HoLnL" = clean.useful.lines.ho, stringsAsFactors = FALSE)
summary.statistics.table <- rbind(summary.statistics.table,summary.statistics.gene)
} #fin del else
}
summary.statistics.table[,2] <- as.numeric(summary.statistics.table[,2])
summary.statistics.table[,3] <- as.numeric(summary.statistics.table[,3])
summary.output <- summary.statistics.table %>%
mutate("LRT" = 2*(summary.statistics.table[,2]-summary.statistics.table[,3]))
summary.output$p.value <- stats::pchisq(summary.output$LRT, df=1, lower.tail = FALSE)
setwd(summary.directory)
write.table(summary.output, "Summary Statistics PAML.txt", quote = FALSE, row.names = FALSE, col.names = TRUE, sep = "\t")
write.table(error.table, "Genes not processed - Error.txt", quote = FALSE, row.names = FALSE, col.names = TRUE, sep = "\t")
## Detect BEB sites
table <- read.delim(paste0(summary.directory,"Summary Statistics PAML.txt"))
table$BEBsites <- NA
i <- 1
for (i in 1:nrow(table)) {
if (table$p.value[i] < 0.05) {
# escanea el archivo y encontra la parte con los sitios beb, guarda aquellos q terminen en *
file.name <- paste0("mlc_Ha_",table[i,1])
file.ha.location <- paste0(Ha.directory,"/", file.name)
lines.ha <- readLines(file.ha.location)
lines.start.array.ha <- startsWith(lines.ha, "Bayes Empirical Bayes")
pos.start.ha <- which(lines.start.array.ha == TRUE)
lines.end.array.ha <- endsWith(lines.ha, "The grid (see ternary graph for p0-p1)")
pos.end.ha <- which(lines.end.array.ha == TRUE)
lineas.utiles.ha <- lines.ha[pos.start.ha:pos.end.ha]
clean.useful.lines.ha <- stringr::str_extract(lineas.utiles.ha, ("\\d+\\s[:upper:]\\s\\d+\\.\\d+\\*"))
clean.useful.lines.ha<- clean.useful.lines.ha[!is.na(clean.useful.lines.ha)]
clean.useful.lines.ha <- paste(clean.useful.lines.ha, collapse = ", ")
if (clean.useful.lines.ha == "") {
table$BEBsites[i] <- "No sites w>1"
} else {
table$BEBsites[i] <- clean.useful.lines.ha
}
} else {
table$BEBsites[i] <- "NS"
}
}
return(table)
getwd()
write.table(table, file = paste0(summary.directory,"BEBSites.txt"), quote = FALSE, row.names = FALSE, col.names = TRUE, sep = "\t")
}
|
dba2f26dc097cea08bbe0b8e83bba7bad27f1394 | 2085ddb7f16dfd2486df9155e2d51fffb1bf6ffc | /R/predictionet.press.statistic.R | bd3735b5c6edd13bf0d9f81790892176609326f8 | [] | no_license | OpenSourceCancer/predictionet | 3483cd6618b1b2efd89db3b3a6cf1f62f125627c | be4c6c95f418aae172b6eceddddbc3c71061ff15 | refs/heads/master | 2021-01-18T15:01:42.308682 | 2012-02-01T17:08:41 | 2012-02-01T17:08:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,094 | r | predictionet.press.statistic.R | ### Function computing the press statistic for all target variables in topology
## topo: inferred topology
## data: matrix of continuous or categorical values (gene expressions for example); observations in rows, features in columns.
## perturbations: matrix of {0, 1} specifying whether a gene has been pertubed in some experiments. Dimensions should be the same than data
## returns press statistic for all target variables
`predictionet.press.statistic` <- function(topo,data,ensemble=FALSE,perturbations=NULL) {
if(missing(perturbations) || is.null(perturbations)) {
perturbations <- matrix(FALSE, nrow=nrow(data), ncol=ncol(data), dimnames=dimnames(data))
} else {
if(nrow(perturbations) == 1) {
perturbations[1, ] <- as.logical(perturbations[1, ])
} else { perturbations <- apply(perturbations, 2, as.logical) }
dimnames(perturbations) <- dimnames(data)
}
if(ensemble){
mypert.ens<-NULL
for(i in 1:ncol(topo)){
mypert.ens<-cbind(mypert.ens,perturbations[,colnames(topo)[i]])
}
colnames(mypert.ens)<-colnames(topo)
perturbations<-mypert.ens
}
res<-matrix(0,nc=ncol(topo),nr=nrow(data),dimnames=list(rownames(data),colnames(topo)))
vec.nsamples<-rep(0,ncol(topo))
for(i in 1:ncol(topo)){
target<-colnames(topo)[i]
ind<-which(topo[,i]!=0)
ind.pert<-which(perturbations[,i]==1)
vec.nsamples[i]<-nrow(data)-length(ind.pert)
if(length(ind.pert)>0){
mydata<-data[-ind.pert,]
}else{
mydata<-data
}
if(length(ind)>0){
if(length(ind)==1){
if(length(ind.pert)>0){
res[-ind.pert,i]<- .regrlin(as.numeric(mydata[,ind]),mydata[,target])
}else{
res[,i]<- .regrlin(as.numeric(mydata[,ind]),mydata[,target])
}
}else{
if(length(ind.pert)>0){
res[-ind.pert,i]<- .regrlin(apply(mydata[,ind],2,as.numeric),mydata[,target])
}else{
res[,i]<- .regrlin(apply(mydata[,ind],2,as.numeric),mydata[,target])
}
}
}else{
res[,i]<- .regrlin(as.numeric(rep(1,nrow(mydata))),mydata[,target])
}
}
res<-res^2
res<-apply(res,2,sum)
res<-res/vec.nsamples
return(res)
}
|
17f34ea20a58b76985d6d0779cb78b973d955154 | 1ac2b643c00480a47af52b95f6fb1dc06cc38c3a | /run_analysis.R | 3e080b92709ac2232a5267dfcdc400f44067b3b8 | [] | no_license | matiroqueta/GettingAndCleaningData | 789b87326f1e814247c5362c6c5aa5267c198e26 | 0bbc3aa10fd5ebfa903806280cb4cd889a4f8f9e | refs/heads/master | 2022-04-29T22:58:08.132246 | 2020-04-26T21:07:24 | 2020-04-26T21:07:24 | 111,299,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,826 | r | run_analysis.R | # You should create one R script called run_analysis.R that does the following.
#
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second,
#independent tidy data set with the average of each variable for each activity and each subject.
#Load reshape package
library(reshape2)
#Check if data was downloaded
files.check = c(
"UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt",
"UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt",
"UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt",
"UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt",
"UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt",
"UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt",
"UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt",
"UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt",
"UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt",
"UCI HAR Dataset/test/X_test.txt",
"UCI HAR Dataset/test/subject_test.txt",
"UCI HAR Dataset/test/y_test.txt",
"UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt",
"UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt",
"UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt",
"UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt",
"UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt",
"UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt",
"UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt",
"UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt",
"UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt",
"UCI HAR Dataset/train/X_train.txt",
"UCI HAR Dataset/train/subject_train.txt",
"UCI HAR Dataset/train/y_train.txt",
"UCI HAR Dataset/activity_labels.txt",
"UCI HAR Dataset/features.txt"
)
files = list.files(pattern = ".txt",recursive = T)
#If any is missing, download all files
if (!all(files.check %in% files)){
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "dataset.zip")
unzip(zipfile = "dataset.zip")
files = list.files(pattern = ".txt",recursive = T)
file.remove("dataset.zip")
}
#Load labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt",stringsAsFactors = F)
colnames(activity_labels) <- c("activity","activity_label")
features <- read.table("./UCI HAR Dataset/features.txt",stringsAsFactors = F)
colnames(features) <- c("index","names")
#Keep mean and std features
features <- subset(features,grepl(pattern = "-*mean*.|.*std*.", x = names, ignore.case = T))
features$names <- gsub(pattern = "-mean", replacement = "Mean",x = features$names)
features$names <- gsub(pattern = "-std", replacement = "Std",x = features$names)
features$names <- gsub(pattern = "[-()]|\\,", replacement = "",x = features$names)
features$names <- make.names(features$names)
#Load datasets
train_x <- read.table(file = "./UCI HAR Dataset/train/X_train.txt", dec = ".")[,features$index]
colnames(train_x) <- features$names
train_y <- read.table(file = "./UCI HAR Dataset/train/y_train.txt", dec = ".")
colnames(train_y) <- "activity"
train_subject <- read.table(file = "./UCI HAR Dataset/train/subject_train.txt", dec = ".")
colnames(train_subject) <- "subject"
test_x <- read.table(file = "./UCI HAR Dataset/test/X_test.txt", dec = ".")[,features$index]
colnames(test_x) <- features$names
test_y <- read.table(file = "./UCI HAR Dataset/test/y_test.txt", dec = ".")
colnames(test_y) <- "activity"
test_subject <- read.table(file = "./UCI HAR Dataset/test/subject_test.txt", dec = ".")
colnames(test_subject) <- "subject"
#Merge x and y
train <- cbind(train_subject, train_x, train_y)
test <- cbind(test_subject, test_x, test_y)
#Merge train and test
df <- rbind(train,test)
#Add activity labels and erase activity index
df <- merge(x = df, y = activity_labels, by = "activity")
df$activity <- NULL
df$activity_label <- as.factor(df$activity_label)
df$subject <- as.factor(df$subject)
rm(list = c("train_x","train_y","train_subject","train",
"test_x","test_y","test_subject","test"))
gc()
#Make de complete dataset "tidy"
df_melt <- melt(df, id = c("subject", "activity_label"))
df_mean <- dcast(df_melt, subject + activity_label ~ variable, mean)
write.table(x = df_mean, file = "tidy.txt", row.names =F, quote = F)
|
29e19f4c8622f748174c44d07bc025fe2943a84c | 08da078d80d848ba659a9b6b5a9de1131ac60aed | /Data/uge8tirsdag.R | 4330a7568bf8e48b9532ac89a23e3c7e054c3ff9 | [] | no_license | Path-to-Exile/SS-Answers-18-19 | 169346cefb2ca7dbe5cf74394c698b55dcab4ef2 | 2dc33c4574ae1a5757a1a2375ebce3d55ebfb937 | refs/heads/master | 2020-05-09T10:16:59.451786 | 2019-04-12T15:35:12 | 2019-04-12T15:35:12 | 181,035,728 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,432 | r | uge8tirsdag.R | #August 2015, opgave 4
#1
setwd("/Users/Svesketerning/Google Drev/SS, 18-19/ss-data")
norrebro = read.table("norrebro.txt", header = TRUE)
mean(norrebro$areal)
#Vi udregner gennemsnittet af areal, vi f??r 69,55. Svaret er C.
#2
model1 = lm(pris ~ areal, data = norrebro)
summary(model1)
#Vi laver modellen og f??r svaret til D
#Responsvariablen er p?? vensreside af ~ mens den forklarende er p?? h??jre side. Se side. 144
#3
confint(model1, level = 0.95)
#man bruger confint n??r man har lin??ere modeller,
#n??r konfidensintervaller skal bruges. deafult er 0.95, men det kan ??ndres vha. level
#Svaret er da C
#4
#I f??lge vores model er er \beta lidt under 40k, s?? vi forventer en forskel p?? lidt under 400k, dvs. D
#5
#Da vi lavede et summary tidligere, f??r vi en en del af outputtet der hedder
#Residual standard error: 555900 on 160 degrees of freedom.
#Spredningen er resdiual standard error, s??ledes f??r vi at A er svaret
#6
coef(model1)
-281871.88+39761.72*55
#ca. 1,9 mil. C
#7
#Vi laver residualer, se evt. side 147
plot(fitted(model1),rstandard(model1))
#8
#Middelv??rdi antagelsen ser god ud.
#Det er ikke nogen systematik i om residualerne ligger over eller under x=0
#Det ligner dog lidt vi har en trumpet form. Antagelsen om varians er derfor ikke s?? sikker.
#Variationen i pris er lille for sm?? lejligheder, men stor for store lejligheder.
#SS.33
#1-2
#lige lavet
#3
newdata = data.frame(areal=c(55))
predict(model1, newdata, interval = "confidence")
predict(model1, newdata, interval = "predict")
#Vi ser at pr??diktionsintervallet er bredere end konfindesintervallet, hvilket er forventeligt
#Fortolkning? Pr??ciktions: 95% interval for hvad fremtidige observationer vil ligge inde i
#Konfidens? Samme som i nadre modeller. Giver vi gentager fors??get en del gange,
#s?? vil omtrent 95% af intervallerne indeholde det sande gennemsnit
#Hovedpointe: Pr??diktionsintervaller fort??ller noget om fordelingen af variable,
#ikke usikkerheden om dataets gennemsnit.
#Pr??diktionsintervaller skal b??de tage h??jde for usikkerheden omkring det sande gennemsnit
#og naturlig variation, hvilket er den intuitive grund til pr??diktionsintervaller er st??rre end konfidens.
#4
#Vi konkluderede lige modellen var lidt usikker ift. til varians. men.
#2,7 er ikke i KI, men er i vores PI. S?? helt dumt er det nok ikke.
#April 2016, opgave 4
#1
gasdata = read.table("gasData.txt", header = TRUE)
mean(gasdata$temp)
#Svar: C
#2 s.144
model2 = lm(gas ~ temp, data = gasdata)
plot(fitted(model2),rstandard(model2))
abline(0,0)
#3
#Det er ingen trumpet form som i opgaven f??r eller nogen i den stil, variansantagelsen ser derfor OK ud.
#Der er heller ikke tendens ift. om residualerne ligger over eller under x=0, middelv??rdiantagelsen er derfor ogs?? OK
#4
summary(model2)
#Vi afl??ser: D er svaret.
#5
confint(model2, level = 0.95)
#C
#6
confint(model2, level = 0.9)
#D
#7
#Vi kvadrerer Residual stan error
8.375^2
#A
#8
newdata2 = data.frame(temp = c(5))
predict(model2, newdata2, interval = "confidence")
#D
#9
#Vi laver en pr??diktionsinterval
newdata3 = data.frame(temp = c(2))
predict(model2, newdata3, interval = "predict")
#Vi ser at 180 er i god overenestemmelse med observeret data. A.
#10
#S??tning 6.4, vi isolerer SPD. Se PDF
ssd = sum((gasdata$temp-mean(gasdata$temp))^2)
ssd
ssd*-11.0142
#evt disse:
var(gasdata$temp)*25
var(gasdata$temp)*(26-1)*(-11.0142)
|
a04f0974f1a05b980b5ab2087390318050230543 | 29585dff702209dd446c0ab52ceea046c58e384e | /mlt/tests/bugfixes.R | 695905b19b621b2614b197b440aa9f60684e5b2a | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,045 | r | bugfixes.R |
library("mlt")
library("sandwich")
set.seed(29)
### Nadja Klein
dat <- data.frame(matrix(rnorm(300),ncol=3))
names(dat) <- c("y","x1","x2")
### set-up conditional transformation model for conditional
y <- numeric_var("y", support = c(min(dat$y), max(dat$y)), bounds = c(-Inf, Inf))
x1 <- numeric_var("x1", support = c(min(dat$x1), max(dat$x1)), bounds = c(-Inf, Inf))
x2 <- numeric_var("x2", support = c(min(dat$x2), max(dat$x2)), bounds = c(-Inf, Inf))
ctmm2 <- ctm(response = Bernstein_basis(y, order = 4, ui = "increasing"),
interacting = c(x1=Bernstein_basis(x1, order = 3),
x2=Bernstein_basis(x2, order= 3)))
### fit model
mltm2 <- mlt(ctmm2, data = dat)
(p <- predict(mltm2, newdata = data.frame(x1=0, x2 = 0), q = mkgrid(mltm2, n = 10)[["y"]]))
### plot data
plot(mltm2,newdata=expand.grid(x1=0:1, x2 = 0:1))
### check update
dist <- numeric_var("dist", support = c(2.0, 100), bounds = c(0, Inf))
speed <- numeric_var("speed", support = c(5.0, 23), bounds = c(0, Inf))
ctmm <- ctm(response = Bernstein_basis(dist, order = 4, ui = "increasing"),
interacting = Bernstein_basis(speed, order = 3))
m <- mlt(ctmm, data = cars)
e <- estfun(m)
w <- runif(nrow(cars)) < .8
m1 <- update(m, weights = w, theta = coef(m))
e1 <- estfun(m1, parm = coef(m))
stopifnot(max(abs(e * w - e1)) < .Machine$double.eps)
e1 <- estfun(m1)
m2 <- mlt(ctmm, data = cars[w > 0,], theta = coef(m))
stopifnot(isTRUE(all.equal(logLik(m1), logLik(m2))))
stopifnot(isTRUE(all.equal(logLik(m1, coef(m2)), logLik(m2, coef(m1)))))
e2 <- estfun(m2, parm = coef(m1))
stopifnot(max(abs(e1[w > 0,] - e2)) < .Machine$double.eps)
### Muriel Buri
data("bodyfat", package = "TH.data")
set.seed(29)
y <- numeric_var("DEXfat", support = c(15, 45), bounds = c(10, 64))
basis_y <- Bernstein_basis(y, order = 2, ui = "incre")
x <- names(bodyfat)[-2]
xfm <- as.formula(paste("~", x, collapse = "+"))
m <- ctm(basis_y, shift = xfm, data = bodyfat)
mod <- mlt(m, data = bodyfat, scale = TRUE, checkGrad = FALSE)
summary(mod)
|
326e99a5f7e95b15ef89a8acc3e99294fc319955 | 723e2ac2013f5c48beb49f9a9cbb3b86c7692f9c | /plot2.R | c8df04a75a3ee50896607f8fa6147e65b2e7542a | [] | no_license | ghollmann/ExData_Plotting1 | fd9952bef1164126349c7a32cb60b5e14ff5e8e4 | 695a6ad5bea1c6f044f97154cea20ee7287bdbf1 | refs/heads/master | 2020-03-12T03:11:48.620985 | 2018-05-14T14:50:48 | 2018-05-14T14:50:48 | 130,419,595 | 0 | 0 | null | 2018-04-20T22:33:27 | 2018-04-20T22:33:26 | null | UTF-8 | R | false | false | 1,936 | r | plot2.R | #---------------------------------------------------------------------------
# This script will read in a Household Power Consumption data file from the
# UCI Machine Learning repository that contains:
# Measurements of electric power consumption in one household with a one-minute
# sampling rate over a period of almost 4 years. Different electrical quantities
# and some sub-metering values are available."
# The results of this script is to produce a plot of household global minute-averaged
# active power (in kilowatts) for the time period of February 1, 2007 and February 2, 2007.
# This plot will be save to a file named plot2.png. In order to read in the entire file
# a minimum of 220 MB or memory will be needed.
#
# Load Packages and get the Data
path<-getwd()
#install.packages("lubridate")
library(lubridate)
url <- "http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(url, file.path(path, "household_power_comsumption.zip"))
unzip(zipfile = "household_power_comsumption.zip")
HPConsumption<-read.table(file="household_power_consumption.txt", sep=";", header=TRUE,
colClasses = c("factor","factor","numeric","numeric","numeric",
"numeric","numeric","numeric","numeric"), na.strings="?")
# Get only the observations from dates February 1st and 2nd of 2007. The date is in the format of d-m-y
HPConsumption$Date <- dmy(HPConsumption$Date)
HPConsumptionSub <- subset(HPConsumption, Date=="2007-2-1" | Date=="2007-2-2")
HPConsumptionSub$DateTime<-ymd_hms(paste(HPConsumptionSub$Date, HPConsumptionSub$Time, " "),
tz=Sys.timezone())
# Produce the plot and save as plot2.png
plot(HPConsumptionSub$Global_active_power~HPConsumptionSub$DateTime, type="l" ,
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file = "plot2.png")
dev.off()
|
c0ce32467c5a3eb3c150de87acae917738870906 | df9c306238e105d36561c94aaed36dc6073492b7 | /scripts/Table_01.R | ad46362ff14e29d953437cd5218b2ceb5bf4862b | [] | no_license | cmcninch/bulliform_cell_gwas | 743b630556ef71f6305037f5963f243594a193d3 | 59c55411ded101dc4ea849bf965202dd43606736 | refs/heads/master | 2020-08-17T10:02:19.067425 | 2019-10-16T22:01:12 | 2019-10-16T22:01:12 | 215,649,656 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,769 | r | Table_01.R | ### Load libraries ###
##################################################
library(tidyverse)
library(data.table)
library(lubridate)
library(lme4)
library(lmerTest)
library(broom.mixed)
### Create functions ###
##################################################
heritability_func <- function(df, mean, trait){
model <- lmer(get(trait) ~ (1 | Genotype) + (1 | Location) + (1 | Genotype:Location) + (1 | Rep:Location), data = df) %>%
tidy() %>%
filter(is.na(std.error))
var_g <- filter(model, group == "Genotype")$estimate[1]
var_error <- filter(model, group == "Residual")$estimate[1]
var_ge <- filter(model, group == "Genotype:Location")$estimate[1]
return(var_g/(var_g + var_ge/2 + var_error/3*2))
}
repeatability_func <- function(df, trait){
model <- lmer(get(trait) ~ (1 | Genotype) + (1 | Rep) + (1 | Genotype:Rep), data = df) %>%
tidy() %>%
filter(is.na(std.error))
var_g <- filter(model, group == "Genotype")$estimate[1]
var_error <- filter(model, group == "Residual")$estimate[1]
return(var_g/(var_g + var_error/2))
}
### Load data and compute entry means ###
##################################################
bulliform_fw_entry_means <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/trait_data/Bulliform_Cell_Field_Areas.csv") %>%
group_by(Image, Genotype, SNP_Genotype_Name, Location, Row, Rep, Block) %>%
summarise(Bulliform_FW = mean(Bulliform_Field_Area, na.rm = TRUE)/1388/950*1000) %>% # width of images is 1388 pixels; 950 pixels = 1mm
group_by(Genotype, Location, Rep, Row) %>%
summarise(Bulliform_FW = mean(Bulliform_FW, na.rm = TRUE)) %>%
drop_na()
bulliform_ff_entry_means <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/trait_data/Bulliform_Cell_Field_Counts.csv") %>%
group_by(Genotype, Location, Rep, Row) %>%
summarise(Bulliform_FF = mean(Bulliform_Cell_Field_Count, na.rm = TRUE)/1040 * 245) %>% # height of images is 1040 pixels; 245 pixels = 1mm
drop_na()
flowering_times <- fread("~/Box Sync/McNinch Projects/Bulliform_Cell_GWAS_Manuscript/data/trait_data/Flowering_Traits.csv") %>%
mutate(Anthesis_Date = as.numeric(mdy(Anthesis_Date) - mdy("5/8/17")),
Silking_Date = as.numeric(mdy(Silking_Date) - mdy("5/8/17"))) %>%
gather("Trait", "Score", 6:7) %>%
group_by(Genotype, Rep, Row, Trait) %>%
summarise(Score = mean(Score, na.rm = TRUE)) %>%
spread(Trait, Score)
### Means and standard deviations ###
##################################################
bulliform_fw_stats <- tibble(trait = "bulliform_fw",
mean = mean(bulliform_fw_entry_means$Bulliform_FW, na.rm = TRUE),
sd = sd(bulliform_fw_entry_means$Bulliform_FW, na.rm = TRUE))
bulliform_ff_stats <- tibble(trait = "bulliform_ff",
mean = mean(bulliform_ff_entry_means$Bulliform_FF, na.rm = TRUE),
sd = sd(bulliform_ff_entry_means$Bulliform_FF, na.rm = TRUE))
dta_stats <- tibble(trait = "dta",
mean = mean(flowering_times$Anthesis_Date, na.rm = TRUE),
sd = sd(flowering_times$Anthesis_Date, na.rm = TRUE))
dts_stats <- tibble(trait = "dts",
mean = mean(flowering_times$Silking_Date, na.rm = TRUE),
sd = sd(flowering_times$Silking_Date, na.rm = TRUE))
combined_stats <- bind_rows(bulliform_fw_stats, bulliform_ff_stats, dta_stats, dts_stats)
### ANOVAs ###
##################################################
bff_multi_model <- aov(formula = Bulliform_FF ~ Genotype + Location + Genotype:Location + Location:Rep, data = bulliform_ff_entry_means)
summary(bff_multi_model)
bfw_multi_model <- aov(formula = Bulliform_FW ~ Genotype + Location + Genotype:Location + Location:Rep, data = bulliform_fw_entry_means)
summary(bfw_multi_model)
bff_single_model <- aov(Bulliform_FF ~ Genotype + Rep + Genotype:Rep, data = filter(bulliform_ff_entry_means, Location == "Ames"))
summary(bff_single_model)
bfw_single_model <- aov(Bulliform_FW ~ Genotype + Rep + Genotype:Rep, data = filter(bulliform_fw_entry_means, Location == "Ames"))
summary(bfw_single_model)
### Heritability and repeatability estimates ###
##################################################
heritability_func(df = bulliform_fw_entry_means, trait = "Bulliform_FW")
repeatability_func(df = filter(bulliform_fw_entry_means, Location == "Ames"), trait = "Bulliform_FW")
heritability_func(df = bulliform_ff_entry_means, trait = "Bulliform_FF")
repeatability_func(df = filter(bulliform_ff_entry_means, Location == "Ames"), trait = "Bulliform_FF")
repeatability_func(df = flowering_times, trait = "Anthesis_Date")
repeatability_func(df = flowering_times, trait = "Silking_Date")
|
76bf72a795821cce04cc25e3976161d34f382253 | 58ee698c784bbcc80316bf9a3fa3a220e5d76d59 | /man/ReportMatching.Rd | 286c0c8042d5d6f0d25519ce941be967639b7231 | [] | no_license | ms609/TreeDist | cd7755fece35e9996ca65733b90338aca4669da2 | c72382062be06babb795c3961a0b3e5c0be7bb20 | refs/heads/master | 2023-07-20T11:09:26.044909 | 2023-07-14T13:06:31 | 2023-07-14T13:06:31 | 196,188,301 | 23 | 4 | null | 2023-09-07T07:14:04 | 2019-07-10T10:55:31 | R | UTF-8 | R | false | true | 713 | rd | ReportMatching.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_distance_utilities.R
\name{ReportMatching}
\alias{ReportMatching}
\title{List clades as text}
\usage{
ReportMatching(splits1, splits2, realMatch = TRUE)
}
\arguments{
\item{splits, splits1, splits2}{Logical matrices with columns specifying membership
of each corresponding matched clade.}
}
\value{
\code{ReportMatching} returns a character vector describing each pairing
in a matching.
}
\description{
List clades as text
}
\seealso{
\code{\link{VisualizeMatching}}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\keyword{internal}
|
15adfa0c0bd9fe287899fd6f34a45350f7709cff | 6570d3c4dd4ddab6b5d17839a270a645176479a4 | /man/kern.Rd | 8e0ff6048e96dce9dcb289a1cfe100f12e8250ee | [] | no_license | Srisai85/generalCorr | b1eb49ceed363ca7ea02a2553dcaba1681261058 | 1b453e5ec8322830f390804bc77a65c31fa88bf2 | refs/heads/master | 2021-01-27T08:13:52.513101 | 2019-10-30T19:50:02 | 2019-10-30T19:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,932 | rd | kern.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kern.R
\name{kern}
\alias{kern}
\title{Kernel regression with options for residuals and gradients.}
\usage{
kern(dep.y, reg.x, tol = 0.1, ftol = 0.1, gradients = FALSE,
residuals = FALSE)
}
\arguments{
\item{dep.y}{{Data on the dependent (response) variable}}
\item{reg.x}{{Data on the regressor (stimulus) variables}}
\item{tol}{{Tolerance on the position of located minima of the cross-validation
function (default =0.1)}}
\item{ftol}{{Fractional tolerance on the value of cross validation function
evaluated at local minima (default =0.1)}}
\item{gradients}{{Make this TRUE if gradients computations are desired}}
\item{residuals}{{Make this TRUE if residuals are desired}}
}
\value{
Creates a model object `mod' containing the entire kernel regression output.
Type \code{names(mod)} to reveal the variety of outputs produced by `npreg' of the `np' package.
The user can access all of them at will by using the dollar notation of R.
}
\description{
Function to run kernel regression with options for residuals and gradients
asssuming no missing data.
}
\note{
This is a work horse for causal identification.
}
\examples{
\dontrun{
set.seed(34);x=matrix(sample(1:600)[1:50],ncol=2)
require(np); options(np.messages=FALSE)
k1=kern(x[,1],x[,2])
print(k1$R2) #prints the R square of the kernel regression
}
}
\references{
Vinod, H. D.'Generalized Correlation and Kernel Causality with
Applications in Development Economics' in Communications in
Statistics -Simulation and Computation, 2015,
\url{http://dx.doi.org/10.1080/03610918.2015.1122048}
}
\seealso{
See \code{\link{kern_ctrl}}.
}
\author{
Prof. H. D. Vinod, Economics Dept., Fordham University, NY
}
\concept{apd amorphous partial derivative}
\concept{kernel regression gradients}
\concept{kernel regression residuals}
|
d2fdf9ed11f758fb00a2107c8756bfc414952acb | e535d498001519774956adcc5b0106a5f4e555ac | /Machado_Dpse/R/sex_biased_genes.R | c9f6aa76d72c3852492d76d7d45e90af05c17226 | [] | no_license | kraigrs/thesis_work | f73c6f130a0cf33ed079acb35208bff9cb85d4d1 | bcc8e46b5c65f08c61d5beb8e29ac7e4df101cff | refs/heads/master | 2021-01-22T16:18:29.372793 | 2015-09-10T18:48:11 | 2015-09-10T18:48:11 | 34,088,947 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,944 | r | sex_biased_genes.R | ############################################################################################
# script to find male-biased genes
############################################################################################
FET_misexpression <- function(locus)
{
table <- matrix(c(locus[1],locus[3]-locus[1],locus[2],locus[4]-locus[2]),
nrow=2,
dimnames=list(c("gene","not gene"),c("sample1","sample2")));
test <- fisher.test(table,or=1,alternative="two.sided",conf.level=0.95);
result <- c(test$estimate,test$conf.int[1],test$conf.int[2],test$p.value);
return(result);
}
create_table <- function(mat)
{
chrXL <- sum(table(mat$chromosome)[grep("^XL",names(table(mat$chromosome)),perl=TRUE)]);
chr4 <- sum(table(mat$chromosome)[grep("^4",names(table(mat$chromosome)),perl=TRUE)]);
chr3 <- sum(table(mat$chromosome)[grep("^3",names(table(mat$chromosome)),perl=TRUE)]);
chrXR <- sum(table(mat$chromosome)[grep("^XR",names(table(mat$chromosome)),perl=TRUE)]);
chr2 <- sum(table(mat$chromosome)[grep("^2",names(table(mat$chromosome)),perl=TRUE)]);
val <- c(chrXL,chr4,chr3,chrXR,chr2);
return(val);
}
############################################################################################
#F_carcass <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_F_carcass_reg_div.txt",header=TRUE,sep="\t");
#M_carcass <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_M_carcass_reg_div.txt",header=TRUE,sep="\t");
#ovaries <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_ovaries_reg_div.txt",header=TRUE,sep="\t");
#testes <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_testes_reg_div.txt",header=TRUE,sep="\t");
F_carcass <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_F_carcass_MOI.txt",header=TRUE,sep="\t");
M_carcass <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_M_carcass_MOI.txt",header=TRUE,sep="\t");
ovaries <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_ovaries_MOI.txt",header=TRUE,sep="\t");
testes <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/data/TL_Toro1_H6_testes_MOI.txt",header=TRUE,sep="\t");
carcass <- merge(M_carcass,F_carcass,by.x="gene",by.y="gene",suffixes=c(".M",".F"));
gonads <- merge(testes,ovaries,by.x="gene",by.y="gene",suffixes=c(".M",".F"));
list <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/genomes/dpse_r3.1.genes.bed",header=FALSE,sep="\t");
colnames(list) <- c("chromosome","start","stop","gene","empty","strand");
FBid2GLEANR <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/genomes/FBid2GLEANR.txt",header=TRUE,sep="\t");
annotation <- merge(FBid2GLEANR,list,by.x="gene",by.y="gene");
# sex-biased genes according to Assis et al.
Assis_MBG <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/Assis_Dpse_MBG.txt",header=TRUE,sep="\t");
Assis_FBG <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/Assis_Dpse_FBG.txt",header=TRUE,sep="\t");
sex_tissue_MBG <- Assis_MBG[which(Assis_MBG$Sex.Tissue.specific == 1),1];
sex_tissue_FBG <- Assis_FBG[which(Assis_FBG$Sex.Tissue.specific == 1),1];
# sex-biased genes according to Zhang et al.
Zhang_MBG <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/Zhang_Dpse_MBG.txt",header=TRUE,sep="\t");
Zhang_FBG <- read.table("/Users/kraigrs/Wittkopp/Machado_Dpse/Zhang_Dpse_FBG.txt",header=TRUE,sep="\t");
temp <- merge(annotation,Zhang_MBG,by.x="GLEANR",by.y="GleanR.ID");
sex_biased_testes <- merge(testes,temp,by.x="gene",by.y="gene");
temp <- merge(annotation,Zhang_FBG,by.x="GLEANR",by.y="GleanR.ID");
sex_biased_ovaries <- merge(ovaries,temp,by.x="gene",by.y="gene");
###########################
# sex-biased genes by FET #
###########################
sex_bias_carcass_results <- t(apply(as.matrix(cbind(
carcass[,2]+carcass[,3]+carcass[,4],
carcass[,6]+carcass[,7]+carcass[,8],
rep(sum(carcass[,2:4],nrow(carcass))),
rep(sum(carcass[,6:8],nrow(carcass))))),
1,FET_misexpression));
qval <- p.adjust(sex_bias_carcass_results[,4],method="fdr");
OR <- sex_bias_carcass_results[,1];
sex_bias_carcass <- cbind(carcass,OR,qval);
sex_bias_gonads_results <- t(apply(as.matrix(cbind(
gonads[,2]+gonads[,3]+gonads[,4],
gonads[,6]+gonads[,7]+gonads[,8],
rep(sum(gonads[,2:4],nrow(gonads))),
rep(sum(gonads[,6:8],nrow(gonads))))),
1,FET_misexpression));
qval <- p.adjust(sex_bias_gonads_results[,4],method="fdr");
OR <- sex_bias_gonads_results[,1];
sex_bias_gonads <- cbind(gonads,OR,qval);
###################################
# sex-biased genes by fold cutoff #
###################################
fold <- 2;
# carcass
fold_change <- log(((carcass[,2]+carcass[,3]+carcass[,4])/sum(carcass[,2:4]))/((carcass[,6]+carcass[,7]+carcass[,8])/sum(carcass[,6:8])),base=fold);
sex_bias_carcass <- cbind(carcass,fold_change);
MBG <- merge(sex_bias_carcass[which(sex_bias_carcass$fold_change > 1),],list,by.x="gene",by.y="gene");
FBG <- merge(sex_bias_carcass[which(sex_bias_carcass$fold_change < -1),],list,by.x="gene",by.y="gene");
not <- merge(sex_bias_carcass[which(sex_bias_carcass$fold_change >= -1 & sex_bias_carcass$fold_change <= 1),],list,by.x="gene",by.y="gene");
write.table(MBG[,1:10],file="/Users/kraigrs/Wittkopp/Machado_Dpse/fold_change_carcass_MBG.txt",quote=FALSE,sep="\t",row.names=FALSE);
write.table(FBG[,1:10],file="/Users/kraigrs/Wittkopp/Machado_Dpse/fold_change_carcass_FBG.txt",quote=FALSE,sep="\t",row.names=FALSE);
fisher.test(rbind(create_table(MBG),create_table(not)),workspace=100000000);
fisher.test(rbind(create_table(FBG),create_table(not)),workspace=100000000);
# gonads
fold_change <- log(((gonads[,2]+gonads[,3]+gonads[,4])/sum(gonads[,2:4]))/((gonads[,6]+gonads[,7]+gonads[,8])/sum(gonads[,6:8])),base=fold);
sex_bias_gonads <- cbind(gonads,fold_change);
MBG <- merge(sex_bias_gonads[which(sex_bias_gonads$fold_change > 1),],list,by.x="gene",by.y="gene");
FBG <- merge(sex_bias_gonads[which(sex_bias_gonads$fold_change < -1),],list,by.x="gene",by.y="gene");
not <- merge(sex_bias_gonads[which(sex_bias_gonads$fold_change >= -1 & sex_bias_gonads$fold_change <= 1),],list,by.x="gene",by.y="gene");
write.table(MBG[,1:10],file="/Users/kraigrs/Wittkopp/Machado_Dpse/fold_change_gonads_MBG.txt",quote=FALSE,sep="\t",row.names=FALSE);
write.table(FBG[,1:10],file="/Users/kraigrs/Wittkopp/Machado_Dpse/fold_change_gonads_FBG.txt",quote=FALSE,sep="\t",row.names=FALSE);
fisher.test(rbind(create_table(MBG),create_table(not)),workspace=100000000);
fisher.test(rbind(create_table(FBG),create_table(not)),workspace=100000000);
##################################
# find sex tissue-specific genes #
##################################
i <- testes$gene %in% gonads$gene;
MBG <- merge(testes[!i,],list,by.x="gene",by.y="gene");
j <- ovaries$gene %in% gonads$gene;
FBG <- merge(ovaries[!j,],list,by.x="gene",by.y="gene");
k <- MBG$gene %in% M_carcass$gene;
testis_specific <- MBG[!k,];
l <- FBG$gene %in% F_carcass$gene;
ovary_specific <- FBG[!l,];
write.table(testis_specific[,1:5],file="/Users/kraigrs/Wittkopp/Machado_Dpse/testis_specific.txt",quote=FALSE,sep="\t",row.names=FALSE);
write.table(ovary_specific[,1:5],file="/Users/kraigrs/Wittkopp/Machado_Dpse/ovary_specific.txt",quote=FALSE,sep="\t",row.names=FALSE);
plot(log2(gonads$par1.testes + gonads$par2.testes + gonads$hyb.testes),
log2(gonads$par1.ovaries + gonads$par2.ovaries + gonads$hyb.ovaries),
xlim=c(log2(60),log2(400000)),ylim=c(log2(60),log2(400000)),
xlab="log2(testes)",ylab="log2(ovaries)",
pch=16,col=rgb(0,0,0,0.5),cex=0.5);
abline(a=0,b=1,col="red");
plot((log2(gonads$par1.testes+gonads$par2.testes+gonads$hyb.testes)+log2(gonads$par1.ovaries+gonads$par2.ovaries+gonads$hyb.ovaries))/2,
log2((gonads$par1.testes+gonads$par2.testes+gonads$hyb.testes)/(gonads$par1.ovaries+gonads$par2.ovaries+gonads$hyb.ovaries)),
ylim=c(-10,10),xlab="avg. log2(expression)",ylab="log2(testes/ovaries)",
pch=16,col=rgb(0,0,0,0.5),cex=0.5);
|
96d22a317241ab81df7fb9dce6764f0a46abc1dc | 59c51287120be281e0e5a155e122e8931508bc4a | /man/firingRateMapAutosPlot.Rd | 3663fb26c5f7a4c3a713d8c35c13c0dfb0ab829f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | kevin-allen/relectro | 2a43159f3f2f08cf7c09168c69b90e1e3b51dc55 | b2fb35c49f8c46d1519f502e9cb01c6c9f2cb0a8 | refs/heads/master | 2020-04-12T08:05:01.663420 | 2019-03-18T21:22:11 | 2019-03-18T21:22:11 | 57,366,781 | 8 | 8 | null | 2018-08-02T14:16:57 | 2016-04-29T08:00:25 | R | UTF-8 | R | false | true | 584 | rd | firingRateMapAutosPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotsSpatial.R
\name{firingRateMapAutosPlot}
\alias{firingRateMapAutosPlot}
\title{Plot a several spatial autocorrelation maps on the same page}
\usage{
firingRateMapAutosPlot(maps, names, ncol = 5, nrow = 6)
}
\arguments{
\item{maps}{A 3d array containing maps (x,y,clu)}
\item{names}{A character vector containing the name of each map in the array}
\item{ncol}{Number of columns of plots per page}
\item{nrow}{Number of rows of plots per page}
}
\description{
This is not currently being developed.
}
|
65f44a44efb222a81eda3ba7de2700ee8f58d740 | 364c6bafaa9529fcf15851f859fdbc6cc45f7fc5 | /r_package/AlloHubMat/R/plot_sectors.R | 8ac158af4d0687e8875793c4861fe0e46823f249 | [] | no_license | jamieAmacpherson/allosteric_signal | 842a5b446924a5b84b2aff589bcb6f51beabc8a1 | 297ef7cd79748d864a2a8f12ff5924064ebd4ed2 | refs/heads/master | 2021-10-12T04:49:27.252794 | 2019-02-01T18:13:05 | 2019-02-01T18:13:05 | 95,106,128 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,821 | r | plot_sectors.R | #===============================================================================
# AlloHubMat
#' 2-dimensional plot of the covariance overlap between continguous mutual information matrices, defined in the input trajectory.
#'
#' This function generates a 2d plot of the diagonal of the smoothed covariance overlap matrix, generated by matrix_smooth().
#' The function generates a plot.
#'
#'
#'
#' @param overlap.matrix.s Smoothed matrix of covariance overlaps, computed using matrix_smooth().
#' @param t.traj Length of the MD simulation, in nanoseconds.
#' @return A plot
#' @export
#===============================================================================
sectors_2dplot = function(overlap.matrix.s, t.traj){
# take the diagonal of the covariance overlap matrix
sector.v = diag(overlap.matrix.s$z)
# determine a vector defining the trajectory time-sequence
times = seq(from = 0, to = t.traj, length.out = length(sector.v))
# combine time and covariance overlap values into a single dataframe
dat = cbind(times, sector.v)
# generate the plot to a pdf file
par(mar = c(5,5,2,2))
plot(dat,
pch = 1,
cex = 2,
xlab = 'Time (ns)',
ylab = expression(paste(Omega['A;B'])),
panel.first = grid(lwd = 2),
cex.axis = 2,
cex.lab = 2,
type = 'o')
}
#===============================================================================
# AlloHubMat
#' Image plot of the covariance overlap between all mutual information matrices defined in the input trajectory.
#'
#' This function generates and image plot of the smoothed covariance overlap matrix generated by matrix_smooth().
#' The function generates an image plot.
#'
#'
#'
#' @param overlap.matrix.s Smoothed matrix of covariance overlaps, computed using matrix_smooth().
#' @param t.traj Length of the MD simulation, in nanoseconds.
#' @return An image plot
#' @export
#===============================================================================
sectors_3dplot = function(overlap.matrix.s, t.traj){
# determine a vector defining the trajectory time-sequence
times = seq(from = 0, to = t.traj, length.out = ncol(overlap.matrix.s$z))
# generate the image plot
par(mar=c(5,5,2,2))
fields::image.plot(times, times, overlap.matrix.s$z,
legend.cex=2,
cex=2,
xlab="Time (ns)",
ylab="Time (ns)")
}
#===============================================================================
# AlloHubMat
#' Plot the frequency distribution of the ergodic sector mutual information.
#'
#' This function generates a frequency distribution plot of the mutual information contained by the ergodic sectors
#' identified by extract_sectors(), and calculates a significance cutoff to distinguish significant correlations
#' from the noise.
#'
#'
#'
#' @param sector.list A list of matrices, each is an averaged mutual information matrix for an ergodic sector
#' @param n.sigmas An integer number of standard deviations used in calculating the mutual information significance threshold (default value of 2)
#' @return A density histogram plot of the mutual information of the ergodic sectors
#' @export
#===============================================================================
sectors_MIfreq = function(sector.list, n.sigmas){
# if the number of standard deviations is not specified by the user, assign default value of 2
if(is.null(n.sigmas)){
n.sigmas = 2
}
# if there are more than a single ergodic sector in the list, then average over the ergodic
# sector mutual information matrices
if(length(sector.list) > 1){
# perform an element-wise averaging over the list of sector matrices
# to determine the average mutual information matrix
mat = Reduce("+", sector.list) / length(sector.list)
} else {
# if there is only a single ergodic sector identified in the MD trajectory, then use
# the mutual information matrix of that single ergodic sector
mat = sector.list[[1]]
}
# determine the average of the average mutual information matrix
mu.mat = mean(mat)
# determine the standard deviation of the average mutual information matrix
sig.mat = sd(mat)
# calculate the significance threshold
threshold = mu.mat + (n.sigmas*sig.mat)
# histogram of the average mutual information matrix
mathist = hist(mat,
plot=FALSE,
breaks='Freedman-Diaconis')
# plot the histogram
par(mar = c(5,5,2,2))
plot(mathist$breaks[-1],
mathist$counts,
type='h',
xlab = "nMI",
ylab = expression(italic(f(nMI))),
cex.lab = 2,
cex.axis = 2,
panel.first = grid())
# line denoting the significance threshold
abline(v=threshold,
lty=2,
col="red",
lwd=2)
# text
text(threshold + 0.3, (max(mathist$counts) / 2),
paste("nMI > ", round(threshold, 3)),
cex=2)
}
#===============================================================================
|
b8e32433dc5aa372c2fb5ba66484a44292a983d3 | 07383de953a20062dd8ae10ec505fb8a5a3c0467 | /models/safe_transformation.R | 06ca90d9a934fd1c56bd89a7223fd6741d12578e | [] | no_license | kozaka93/SAFE_use_case | 098b3b2c4b0d0377b087f1f10baaa37f1ca91117 | 0a4299a5adc2ae1fa600eaf354c51c28d4f9064c | refs/heads/master | 2022-11-18T19:11:12.628667 | 2020-07-15T20:11:28 | 2020-07-15T20:11:28 | 279,329,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 468 | r | safe_transformation.R | #### SAFE TRANSFORMATION ####
devtools::install_github("ModelOriented/rSAFE")
library(DALEX)
library(DALEXtra)
library(rSAFE)
explainer_gbm <- explain_mlr(mod_gbm,
data = train,
y = train$class == "bad")
safe_extractor_gbm <- safe_extraction(explainer_gbm,
response_type = "pdp")
#save(safe_extractor_gbm, file = "./models/safe_extractor_gbm_german_credit_split4.rda") |
19e6b2ccaf323dbf1cc9e109fd0a189c681802fc | 796115a24cc8fcd0f0f8cb5c5982e6f55e48c16e | /shiny_app/ui.R | 56721c7d5e196290f3e0bada2c81542b97d1ad49 | [] | no_license | lsauchanka/Project-3 | 4c456617bfadbf75d8db573c9d0d699d33b09eee | d09bc87b0ddf6c3999cfe64855ed5b2c4d4f6741 | refs/heads/main | 2023-05-25T09:56:08.374426 | 2021-06-08T23:23:02 | 2021-06-08T23:23:02 | 375,131,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | r | ui.R |
fluidPage(
titlePanel("Posts by Year"),
sidebarLayout(
sidebarPanel(
selectInput("year", "Year:",
choices=colnames(PostsbyMonth)),
hr(),
helpText("Data from gaming.stackexchange.com")
),
mainPanel(
plotOutput("plot")
)
)
) |
bb3a8920b5ec0ff5950f0d9650766bf33ebfcf40 | b821cea9ea90e31bfe72c8ed718a68a5e4e2dd6d | /Tree.R | 7d2717fa5bb9f863588d16bf8fc2d99122bbb890 | [] | no_license | christyChenYa/GoogleSearchQueries | 942db1d5ad47c377e33dd8456172f81db2f0c682 | c29ce211b0c2a51fdc2e5ad3536c9003947d7c2b | refs/heads/master | 2020-04-12T05:49:16.738076 | 2019-01-11T00:32:31 | 2019-01-11T00:32:31 | 162,332,809 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,345 | r | Tree.R | #treelab
getwd()
setwd("/Users/Christy/Desktop/Data Mining ")
training<-read.csv("training.csv")
test=read.csv("test.csv")
######################### convert to data.table ######################################
training<-data.table(training)
test<-data.table(test)
######################### check data ################################################
head(training,n = 20)
str(training)
######################### Generate new features ######################################
training<-training[,url_per_query:= .N,by='query_id']
training<-training[,url_median:=as.integer(median(url_id)),by='query_id']
training<-training[,url_distance:=log(1+abs(url_id-url_median))]
training<-training[,url_appearance:= .N,by='url_id']
training<-training[,url_otherSuccess:= sum(relevance)-relevance,by='url_id']
training<-training[,url_otherSRatio:= url_otherSuccess/url_appearance]
training[,c('sig1_rk','sig2_rk','sig3_rk','sig4_rk','sig5_rk','sig6_rk','sig7_rk','sig8_rk'):=
list(rank(sig1),rank(sig2),rank(sig3),rank(sig4),rank(sig5),rank(sig6),rank(sig7),rank(sig8)),by='query_id']
######################### prepare for learning ######################################
train_feature<-training[,c(3:12,14:25,27)]
train_target<-training[,13]
library(tree)
#error.rate=c(1:10)
#k=10
#folds=sample(1:k,nrow(training),replace=TRUE)
High=ifelse(training$relevance==0,"N","Y")
training=data.frame(training,High)
training.size <- dim(training)[1]
test = sample(1:training.size,training.size/10)
train=-test
training.test = training[test,]
training.train = training[-test,]
#for(i in 1:k){
set.seed(1)
#train=training[folds!=i,]
#test=training[folds==i,]
#High.test=High[folds==i]
tree.train=tree(High~.,data=training.test)
summary(tree.train)
tree.pred=predict(tree.train,training.test,type="class")
x=table(tree.pred,High.trainingtest)
error.rate=(x[1,2]+x[2,1])/dim(test)[1]
#}
mean(error.rate)
bestind = which.min(error.rate)
bestind
set.seed(1)
tree.best=tree(High~., data=training[folds==bestind,])
summary(tree.best)
###################CV PLOT########################
cv.relevance <- cv.tree(rf.train1)
plot(cv.relevance$size, cv.relevance$dev, type = "b")
tree.min <- which.min(cv.relevance)
points(cv.relevance$size[tree.min], cv.relevance$dev[tree.min], col="red", cex =2, pch=20)
|
0ff42ff7b52ef7e0f636ec528529139017a02858 | 125dbf3c28b775d115e490d456b3c457346d4127 | /IPM_02/IPM_02.2-5.R | fd0fe87559a077fc95b96cf6797bf6898c1bef06 | [] | no_license | norberello/IPM_code | aa53648b011c1e9105bdd81322a7851b81a4e5f3 | fe6d85858e5c37f1ae9ad1d24525208c964b9e28 | refs/heads/main | 2023-08-31T14:36:08.102940 | 2021-10-12T07:48:58 | 2021-10-12T07:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,095 | r | IPM_02.2-5.R | # Schaub & Kéry (2022) Integrated Population Models
# Chapter 2 : Bayesian statistical modeling using JAGS
# ----------------------------------------------------
# Code from proofs.
library(IPMbook)
# 2.2 Parametric statistical modeling
# ===================================
# 2.2.2 Parametric statistical models for inference about chance processes
# ------------------------------------------------------------------------
# Read the data for the tadpole experiment into R
N <- 50 # Number of tadpoles released initially
y <- 20 # Number of tadpoles detected later
# 2.3 Maximum likelihood estimation in a nutshell
# ===============================================
# Fig. 2.3
all.possible <- 0:50 # All possible values
pmf1 <- dbinom(all.possible, size=50, prob=0.1) # pmf 1
pmf2 <- dbinom(all.possible, size=50, prob=0.5) # pmf 2
pmf3 <- dbinom(all.possible, size=50, prob=0.9) # pmf 3
# ~~~~ additional code for the plot ~~~~
op <- par(mfrow=c(1, 3), mar=c(5, 5, 4, 1), cex.lab=1.5, cex.axis=1.5, cex.main=2, las=1)
plot(all.possible, pmf1, type="h", lend="butt", lwd=3, frame=FALSE,
xlab="Counts (y)", ylab="Probability of y", main=expression(paste(theta, " = 0.1")))
abline(v=20, col="blue", lwd=2)
plot(all.possible, pmf2, type="h", lend="butt", lwd=3, frame=FALSE,
xlab="Counts (y)", ylab="", main=expression(paste(theta, " = 0.5")))
abline(v=20, col="blue", lwd=2)
plot(all.possible, pmf3, type="h", lend="butt", lwd=3, frame=FALSE,
xlab="Counts (y)", ylab="", main=expression(paste(theta, " = 0.9")))
abline(v=20, col="blue", lwd=2)
par(op)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Use RNG to obtain binomial density for pmf 3
hist(rbinom(10^6, size=50, prob=0.9), freq=FALSE, xlim=c(0, 50)) # Not shown
# Brute-force search for MLE of theta for the tadpole data set (Fig. 2.4)
try.theta <- seq(0, 1, by=0.01) # Values to try out
like <- dbinom(20, 50, try.theta, log=FALSE) # Likelihood
loglike <- dbinom(20, 50, try.theta, log=TRUE) # Log-Likelihood
negloglike <- -dbinom(20, 50, try.theta, log=TRUE) # NLL
op <- par(mfrow=c(1, 3), mar=c(5, 5, 4, 1), cex.lab=1.5, cex.axis=1.5)
plot(x=try.theta, y=like, xlab=expression(paste("Detection probability (", theta, ")")),
ylab="Likelihood", frame=FALSE, type="p", pch=16, col="black")
abline(v=try.theta[which(like == max(like))], col="red", lwd=3)
plot(x=try.theta, y=loglike, xlab=expression(paste("Detection probability (", theta, ")")),
ylab="Log-Likelihood", frame=FALSE, type="p", pch=16, col="black")
abline(v=try.theta[which(loglike == max(loglike))], col="red", lwd=3)
plot(x=try.theta, y=negloglike, xlab=expression(paste("Detection probability (", theta, ")")),
ylab="Negative log-Likelihood", frame=FALSE, type="p", pch=16, col="black")
abline(v=try.theta[which(negloglike == min(negloglike))], col="red", lwd=3)
par(op)
# 2.4 Bayesian inference
# ======================
theta.vals <- seq(0, 1, 0.001)
# Define likelihood function (same for all four analyses)
like <- dbinom(20, 50, theta.vals)
# Define four prior distributions
prior0 <- dbeta(theta.vals, 1, 1)
prior1 <- dbeta(theta.vals, 4, 6)
prior2 <- dbeta(theta.vals, 40, 60)
prior3 <- dbeta(theta.vals, 60, 40)
# Derive four posterior distributions
post0 <- dbeta(theta.vals, 20 + 1, 30 + 1)
post1 <- dbeta(theta.vals, 20 + 4, 30 + 6)
post2 <- dbeta(theta.vals, 20 + 40, 30 + 60)
post3 <- dbeta(theta.vals, 20 + 60, 30 + 40)
# ~~~~ additional code for plotting Fig 2.5 ~~~~
sc.like <- like * (50 + 1) # Scale likelihood. Multiplied by n + 1
# because the area under the curve for trial size 1 is 1/(n + 1).
library(scales)
co <- viridis_pal(option="E")(20)[c(18, 11, 2)]
lwd <- 3; cx <- 1.5
op <- par(mfrow=c(2, 2), mar=c(5, 5, 4, 2), cex.axis=cx, cex.lab=cx, cex.main=cx)
# Analysis 1 with vague prior
plot(theta.vals, post0, type ="l", col=co[3], xlab="",
ylab="Scaled likelihood or density", las=1, frame=FALSE, lwd=2, ylim=c(0, 10))
mtext("Vague prior", side=3, line=0.5, font=2)
lines(theta.vals, sc.like, lty=2, lwd=2, col=co[2])
lines(theta.vals, prior0, lwd=2, col=co[1])
legend(0.5, 10, c("Prior dist.", "Likelihood function", "Posterior dist."),
col=co, lty=1, lwd=2, bty="n")
# Analysis 2 with informative prior 1
plot(theta.vals, post1, type="l", lwd=2, col=co[3], xlab="", ylab="", las=1,
frame=FALSE, ylim=c(0, 10))
mtext("Informative prior 1", side=3, line=0.5, font=2)
lines(theta.vals, sc.like, lwd=2, col=co[2])
lines(theta.vals, prior1, lty=1, lwd=2, col=co[1])
# Analysis 3 with informative prior 2
plot(theta.vals, post2, type="l", lwd=2, col=co[3], xlab= expression(theta),
ylab="Scaled likelihood or density", las=1, frame=FALSE, ylim=c(0, 10))
mtext("Informative prior 2", side=3, line=0.5, font=2)
lines(theta.vals, sc.like, lwd=2, col=co[2])
lines(theta.vals, prior2, lty=1, lwd=2, col=co[1])
# Analysis 4 with informative prior 3
plot(theta.vals, post3, type="l", lwd=2, col=co[3], xlab= expression(theta),
ylab='', las=1, frame=FALSE, ylim=c(0, 10))
mtext("Informative prior 3", side=3, line=0.5, font=2)
lines(theta.vals, sc.like, lwd=2, col=co[2])
lines(theta.vals, prior3, lty=1, lwd= 2, col=co[1])
par(op)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~ additional code for Fig 2.6 ~~~~
# How information in the data changes our state of knowledge
co <- c("grey92", "grey60", "black")
xylim <- 0:1
# plot(theta.vals, sc.prior1, type="l", lwd=5, col=co[1], xlab=expression(theta),
plot(theta.vals, prior0, type="l", lwd=5, col=co[1], xlab=expression(theta),
ylab="", xlim=xylim, ylim=xylim, las=1, frame=FALSE, axes=FALSE)
axis(1)
sc.post1 <- post1/max(post1)
lines(theta.vals, sc.post1, col=co[2], lwd=5)
abline(v=0.4, col=co[3], lwd=5)
legend(0.6, 0.9, c("Complete ignorance", "Improved state of knowledge",
"Certainty (fixed parameter)"), col=co, lty=1, lwd=5, bty="n")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2.5 Bayesian computation
# ========================
# Choose initial value for logit(theta) and tuning parameters
ltheta1 <- 1 # Initial value for tadpole detection prob.
sigma_prop <- 1 # SD of Gaussian proposal distribution
# Array to hold the MCMC samples
ltheta <- numeric()
# Initial value becomes first (and 'current') value in the chain
ltheta[1] <- ltheta1
ltheta # Our posterior sample up to now (not shown)
# Randomly perturb the current value
set.seed(1) # To initalize your RNGs identically to ours
( ltheta_star <- rnorm(1, ltheta[1], sigma_prop) )
# [1] 0.3735462
# Compute likelihood times prior evaluated for the proposed new value of ltheta
( pd_star <- dbinom(20, 50, plogis(ltheta_star)) * dbeta(plogis(ltheta_star), 1, 1) )
# [1] 0.002716919
# Compute likelihood times prior evaluated for the current value of ltheta
( pd_1 <- dbinom(20, 50, plogis(ltheta[1])) * dbeta(plogis(ltheta[1]), 1, 1) )
# [1] 6.951277e-07
# Compute posterior density ratio R
( R <- pd_star / pd_1 )
# [1] 3908.518
# Add theta_star into MCMC sample
ltheta[2] <- ltheta_star
ltheta # Our posterior sample up to now (not shown)
( ltheta_star <- rnorm(1, ltheta[2], sigma_prop) )
# [1] 0.5571895
pd_star <- dbinom(20, 50, plogis(ltheta_star)) * dbeta(plogis(ltheta_star), 1, 1)
pd_t <- dbinom(20, 50, plogis(ltheta[2])) * dbeta(plogis(ltheta[2]), 1, 1)
( R <- pd_star / pd_t )
# [1] 0.1398872
( keep.ltheta_star <- rbinom(1, 1, R) )
# [1] 0
ltheta[3] <- ltheta[2]
ltheta # Our posterior sample up to now (not shown)
# Iteration 4 to T of RW-MH algorithm
T <- 60000 # Choose chain length
for (t in 4:T){ # Continue where we left off
ltheta_star <- rnorm(1, ltheta[t-1], sigma_prop)
pd_star <- dbinom(20, 50, plogis(ltheta_star)) * dbeta(plogis(ltheta_star), 1, 1)
pd_t <- dbinom(20, 50, plogis(ltheta[t-1])) * dbeta(plogis(ltheta[t-1]), 1, 1)
R <- min(1, pd_star / pd_t) # Note more general solution here
keep.ltheta_star <- rbinom(1, 1, R)
ltheta[t] <- ifelse(keep.ltheta_star == 1, ltheta_star, ltheta[t-1])
}
# ~~~~ code for Fig. 2.7 (left) ~~~~
op <- par(mfrow=c(1, 3), mar=c(6, 7, 6, 3), cex.lab=2, cex.axis=2, cex.main=2, las=1)
plot(1:10, plogis(ltheta[1:10]), xlab='Iteration', ylab=expression(theta),
type='l', frame=FALSE, lwd=3, main='First ten iterations')
abline(h=0.4, col='red', lwd=2) # The maximum likelihood estimate
abline(h=mean(plogis(ltheta[1:10])), lty=2, col='blue', lwd=2)
# Update trace-plot of time series of posterior draws (Fig. 2-7 middle)
plot(1:T, plogis(ltheta), xlab='Iteration', ylab= expression(theta), type='l',
frame=FALSE, lwd=1, main='All iterations')
abline(h=0.4, col='red', lwd=3) # The maximum likelihood estimate
abline(h=mean(plogis(ltheta)), lty=2, col='blue', lwd=3)
# Plot histogram of posterior samples of tadpole detection probability
# Fig. 2-7 right
hist(plogis(ltheta), breaks=50, col='lightgray', xlab=expression(theta),
main=expression(bold(paste('Posterior distribution of ', theta))), border=NA)
abline(v=0.4, col='red', lwd=3) # The maximum likelihood estimate
abline(v=mean(plogis(ltheta)), lty=2, col='blue', lwd=3) # Posterior mean
par(op)
# ~~~~~~~~~~~~~~~~~~~~~
library(IPMbook)
out <- demoMCMC(y=20, N=50, niter=25000, mu.ltheta=0, sd.ltheta=100, prop.sd=1, init=0)
# produces Fig 2.8
# Show convergence
tmp <- demoMCMC(y=20, N=50, niter=2500, mu.ltheta=0, sd.ltheta=100, prop.sd=0.1, init=10)
# No convergence within 2500 iterations
tmp <- demoMCMC(y=20, N=50, niter=2500, mu.ltheta=0, sd.ltheta=100, prop.sd=0.1, init=100)
# But convergence is reached after about 3k iterations
tmp <- demoMCMC(y=20, N=50, niter=25000, mu.ltheta=0, sd.ltheta=100, prop.sd=0.1, init=100)
# ... and you get convergence within 2500 iters with longer step length
tmp <- demoMCMC(y=20, N=50, niter=2500, mu.ltheta=0, sd.ltheta=100, prop.sd=1, init=100)
# ~~~~ extra code to explore step size ~~~~
# Very, very small step size: very inefficient MCMC
str(out <- demoMCMC(prop.s = 0.01))
# Very small step size: fairly inefficient
str(out <- demoMCMC(prop.s = 0.1))
# Larger than default step size: efficiency goes down
str(out <- demoMCMC(prop.s = 10))
# Much larger step size..... brrrrr!
str(out <- demoMCMC(prop.s = 100))
# Brutally large step size..... ACH!
str(out <- demoMCMC(prop.s = 1000))
# Default step size: pretty good for this case
str(out <- demoMCMC(prop.s = 1))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
str(out)
# List of 7
# $ y : num 20
# $ N : num 50
# $ mu.ltheta: num 0
# $ sd.ltheta: num 100
# $ prop.sd : num 1
# $ ltheta : num [1:25000] 0 0 -0.0519 -0.0519 -0.7637 ...
# $ acc.prob : num 0.34
# Measures of central tendency to serve as point estimates
library(MCMCglmm) # For function for mode
mean(plogis(out$ltheta)) # Posterior mean
# [1] 0.4007489 # Your result will differ slightly
median(plogis(out$ltheta)) # Posterior median
# [1] 0.4001582 # Your result will differ
posterior.mode(mcmc(plogis(out$ltheta))) # Posterior mode (ditto)
# var1
# 0.4152145
# Measures of spread:
# - Bayesian 'variant' of standard error (= posterior SD)
# - two Bayesian credible intervals (CRI and HPDI)
sd(plogis(out$ltheta)) # Posterior SD
# [1] 0.06950494 # Your result will differ
quantile(plogis(out$ltheta), prob=c(0.025, 0.975)) # Symmetrical Bayesian CRI (your result will differ)
# 2.5% 97.5%
# 0.2705029 0.5383629
HPDinterval(mcmc(plogis(out$ltheta))) # Highest posterior density credible
# interval(HPDI); your result will differ
# lower upper
# var1 0.2672082 0.5349586
# attr(,"Probability")
# [1] 0.95
# Compute p(theta > 0.5)
mean(plogis(out$ltheta) > 0.5)
# [1] 0.07584
|
8e353672ec58716dbc4adc8b62f53d99ad6a5ba2 | e235bfe1d784b6046a9411d5e2c4df3d0b61f34f | /data-raw/save_gss.R | 5675f2a3c4e9066b362a037d134e8edfe18b4d78 | [
"MIT"
] | permissive | tidymodels/infer | 040264d3b295c6986a9141d3d6fffe8a51e73db0 | 6854b6e5f8b356d4b518c2ca198cc97e66cd4fcb | refs/heads/main | 2023-08-07T03:34:31.100601 | 2023-07-27T21:21:58 | 2023-07-27T21:21:58 | 93,430,707 | 517 | 74 | NOASSERTION | 2023-09-06T12:46:24 | 2017-06-05T17:41:42 | R | UTF-8 | R | false | false | 2,453 | r | save_gss.R | library(dplyr)
library(forcats)
library(srvyr)
library(ggplot2)
# pull gss data
temp <- tempfile()
download.file("https://gss.norc.org/documents/stata/GSS_stata.zip",temp)
# if this next line errors with "No such file or directory", try
# incrementing the number after "_R"
gss_orig <- haven::read_dta(unz(temp, filename = "GSS7218_R2.DTA")) %>%
haven::as_factor()
unlink(temp)
# select relevant columns
gss_small <- gss_orig %>%
filter(!stringr::str_detect(sample, "blk oversamp")) %>% # this is for weighting
select(year, age, sex, college = degree, partyid, hompop,
hours = hrs1, income, class, finrela, weight = wtssall) %>%
mutate_if(is.factor, ~fct_collapse(., NULL = c("IAP", "NA", "iap", "na"))) %>%
mutate(age = age %>%
fct_recode("89" = "89 or older",
NULL = "DK") %>% # truncated at 89
as.character() %>%
as.numeric(),
hompop = hompop %>%
fct_collapse(NULL = c("DK")) %>%
as.character() %>%
as.numeric(),
hours = hours %>%
fct_recode("89" = "89+ hrs",
NULL = "DK") %>% # truncated at 89
as.character() %>%
as.numeric(),
weight = weight %>%
as.character() %>%
as.numeric(),
partyid = fct_collapse(partyid,
dem = c("strong democrat", "not str democrat"),
rep = c("strong republican", "not str republican"),
ind = c("ind,near dem", "independent", "ind,near rep"),
other = "other party"
),
income = factor(income, ordered = TRUE),
college = fct_collapse(college,
degree = c("junior college", "bachelor", "graduate"),
"no degree" = c("lt high school", "high school"),
NULL = "dk" # no dks show up in the data, so drop this level
)
)
# sample 3k rows, first dropping NAs
set.seed(20200201)
gss <- gss_small %>%
drop_na() %>%
sample_n(500)
# check that the sample is similar unweighted to weighted
gss_wt <- srvyr::as_survey_design(gss, weights = weight)
unweighted <- gss %>%
group_by(year, sex, partyid) %>%
summarize(n = n()) %>%
ungroup() %>%
group_by(year, sex) %>%
mutate(prop = n / sum(n))
weighted <- gss_wt %>%
group_by(year, sex, partyid) %>%
summarize(prop = srvyr::survey_mean())
# save data into package
usethis::use_data(gss, overwrite = TRUE)
devtools::document()
|
7159401a7de2c9ba303610e57b526b9fa7c046b7 | a8b4f18d326a8579cf038ecc13cc2e96b491571d | /Day_3_mapping_with_style.R | ed4722bcc991f5325bf000768a90be14ba80e319 | [] | no_license | gayiza/intro_r_uwc | f99a28a3be1674289da2e2b5d6fb2651481c4960 | dd0f4ebda09d80c34f4bf0caf46111e26c876721 | refs/heads/master | 2020-04-19T09:20:06.426141 | 2019-02-03T07:19:31 | 2019-02-03T07:19:31 | 168,107,217 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,467 | r | Day_3_mapping_with_style.R | #mapping with styke
#Day3
#Mihle Gayiza
#31st Jan 2019
# Load libraries
library(tidyverse)
library(scales)
library(ggsn)
#Default map (world map)
ggplot() +
borders() + # The global shape file
coord_equal() # Equal sizing for lon/lat
#Better view of SA
sa_1 <- ggplot() +
borders(fill = "grey70", colour = "black") +
coord_equal(xlim = c(12, 36), ylim = c(-38, -22), expand = 0) # Force lon/lat extent
sa_1
#Map with labelling
sa_2 <- sa_1 +
annotate("text", label = "Atlantic\nOcean",
x = 15.1, y = -32.0,
size = 5.0,
angle = 30,
colour = "coral1") +
annotate("text", label = "Indian\nOcean", #/n (Y) means that you put the word under
x = 33.2, y = -34.2,
size = 5.0,
angle = 330,
colour = "lawngreen")
sa_2
#Map of SA with scale bar
sa_3 <- sa_2 +
scalebar(x.min = 22, x.max = 26, y.min = -36, y.max = -35, # Set location of bar
dist = 200, height = 1, st.dist = 0.8, st.size = 4, # Set particulars
dd2km = TRUE, model = "WGS84") + # Set appearance
north(x.min = 22.5, x.max = 25.5, y.min = -33, y.max = -31, # Set location of symbol
scale = 1.2, symbol = 16)
sa_3
#inserting maps
sa_4 <- sa_3 +
annotation_custom(grob = ggplotGrob(africa_map),
xmin = 20.9, xmax = 26.9,
ymin = -30, ymax = -24)
sa_4
# [A.A]
# Neat script, Add more self made comments throughput the document |
f46349613aeb51baeaa1abfc662273f79b228dec | f5ce7a71e7fb4053c99a37c1330a762ee929b51a | /code/cleaning_raw_data.R | 71c81aae098ce56a3c8178b8b519ada3b73bc642 | [] | no_license | seanhart21/farmR | 5bf189f3c4ae5d18974807a1ffebc39ef3f79ec6 | 7eb44dbdeb8194afe34a520a0cf16f0ebfb4b0a1 | refs/heads/master | 2023-04-03T12:38:00.375495 | 2021-04-12T17:00:58 | 2021-04-12T17:00:58 | 357,263,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 898 | r | cleaning_raw_data.R | ###import and clean data
library(tidyverse)
df <- readxl::read_xlsx("raw_data/Hart Farm Ag Log Excerpt.xlsx")
head(df)
## Get Dates from excel format.
df[,1]
tail(df[,1])
dates <- as.data.frame(df[,1])
dates <- as.numeric(dates[1:nrow(dates),])
library(lubridate)
as.Date('1899-12-30') + days(dates)
df$date <- as.Date('1899-12-30') + days(dates)
head(df)
## just get squirrels
squirrel.id <- which(df[3,1:56]=="Squirrel") #13
sq <- df %>% select(date, n = squirrel.id)
sq <- sq[4:nrow(sq),]
head(sq)
sq$n <- ifelse(is.na(sq$n), 0, as.numeric(sq$n))
head(sq)
ggplot(sq, aes(x=date,y=n)) + geom_point()+
theme_classic()
## just get cardinals
card.id <- which(df[3,1:56]=="Cardinal") #13
cd <- df %>% select(date, n = card.id)
cd <- cd[4:nrow(cd),]
head(cd)
cd$n <- ifelse(is.na(cd$n), 0, as.numeric(cd$n))
head(cd)
ggplot(cd, aes(x=date,y=n)) + geom_point()+
theme_classic()
|
98dbf959201928d2d7f2c49d5070822851e2f25e | a35dc9d7130913d23542d23ce11f549f7268b93b | /plot2.R | 015deffdb42a038699de056b937648efd447d046 | [] | no_license | pratta3/RepData_PeerAssessment1 | 557bace29d5cc37f410327b077ea236086711534 | 06cde98165e6c2181f630e9e30817e12ceaaf6d8 | refs/heads/master | 2020-12-30T09:59:02.714794 | 2017-08-04T04:12:19 | 2017-08-04T04:12:19 | 99,251,462 | 0 | 0 | null | 2017-08-03T16:07:29 | 2017-08-03T16:07:29 | null | UTF-8 | R | false | false | 802 | r | plot2.R |
# Plot2 is a time-series plot showing the average number of steps at different
# time intervals throughout the day.
source("script_activity.R")
library(ggplot2)
theme <- theme_bw() +
theme(plot.title = element_text(size = 24, hjust = .5,
face = "bold", margin = margin(0,0,25,0)),
axis.title = element_text(size = 18),
axis.text = element_text(size = 10),
strip.text = element_text(size = 12))
plot2 <- ggplot(pattern.steps, aes(time, mean))
plot2 <- plot2 + geom_line() +
scale_x_datetime(date_labels = "%H:%M") +
labs(title = "Average number of steps taken throughout the day",
x = "Time of day",
y = "Number of steps") +
theme
plot2 |
7e8caf3270c7f362f2e97c339d8b67947e9ead3f | d4104e2df5fe1d38f92ffad46b1496f0e95d91c3 | /plot4.R | dedaa49edf2ccbaba194049a40ffc76348120286 | [] | no_license | lduan18/ExData_Plotting1 | 4986c48934885ca59fe7944148c5e9c642bc5464 | b0b9ff85cf713f21462135aa2c318bb66165c233 | refs/heads/master | 2021-05-28T16:35:07.069999 | 2015-04-12T01:59:25 | 2015-04-12T01:59:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,041 | r | plot4.R | file <- read.table("./household_power_consumption.txt", stringsAsFactors=FALSE, header=FALSE, sep=";", nrow=1000000)
file2 <- read.table("./household_power_consumption.txt", stringsAsFactors=FALSE, header=FALSE, sep=";", skip=1000000)
file3 <- file[file$V1 %in% c("1/2/2007", "2/2/2007"),]
file3$V1V2 <- paste(file3$V1, file3$V2)
file3$V1V2 <- strptime(file3$V1V2, "%d/%m/%Y %H:%M:%S")
file3$V3 <- as.numeric(file3$V3)
par(mfrow=c(2,2))
par(bg = "white")
plot(file3$V1V2,file3$V3, type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(file3$V1V2, file3$V5, type="l", ylab="Voltage", xlab="datetime")
plot(file3$V1V2, file3$V7, type="l", xlab="", ylab="Energy sub metering")
lines(file3$V1V2, file3$V8, col="red")
lines(file3$V1V2, file3$V9, col="blue")
legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"),lty=c(1,1,1), bty="n")
plot(file3$V1V2, file3$V4, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off() |
29b4a2b1adb002ba0fbd8b4a66c2f8308cbca1cb | b1181c0b0b8f88f09e61b5153799aa0fb7137c69 | /run_analysis.R | dd07ebbcffb7f6a698d37162a4385f2108d02d11 | [] | no_license | briantxbai/Getting-and-cleaning-data-course-project | c6dc7fe10028ac8cbc45f588cd78c557afe86d2c | 138309954ed8ee922be5d39cfe5b79b07d9b42fc | refs/heads/master | 2021-01-01T05:15:59.022666 | 2016-05-07T08:41:45 | 2016-05-07T08:41:45 | 57,648,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,771 | r | run_analysis.R | ##This R script includes R codes for JHU Getting
##and cleaning data course project
library(dplyr)
##Download and load data
filename <- "getdata_dataset.zip"
if(!file.exists(filename)){
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, filename)
}
if(!file.exists("UCI HAR Dataset")){
unzip(filename)
}
trainData <- read.table(".//UCI HAR Dataset//train//X_train.txt")
trainLabels <- read.table(".//UCI HAR Dataset//train//y_train.txt")
trainSubjects <- read.table(".//UCI HAR Dataset//train//subject_train.txt")
testData <- read.table(".//UCI HAR Dataset//test//X_test.txt")
testLabels <- read.table(".//UCI HAR Dataset//test//y_test.txt")
testSubjects <- read.table(".//UCI HAR Dataset//test//subject_test.txt")
features <- read.table(".//UCI HAR Dataset//features.txt")
activityLabels <- read.table(".//UCI HAR Dataset//activity_labels.txt")
##Extract the feature of interest (mean & standard deviation)
feature.index <- grep("mean\\(\\)|std\\(\\)", features[,2])
feature.name <- features[feature.index,2]
feature.name <- gsub("-","",feature.name)
feature.name <- gsub('[()]','',feature.name)
trainData <- trainData[,feature.index]
testData <- testData[,feature.index]
##Merge the data, subjects and activities, and add desciptive labels
##for each variable
trainData <- cbind(trainSubjects,trainLabels,trainData)
testData <- cbind(testSubjects,testLabels,testData)
MergedData <- rbind(trainData,testData)
colnames(MergedData) <- c("Subjects","Activities",feature.name)
##Uses descriptive activity names to name the activities in the data set
MergedData$Activities <- factor(MergedData$Activities, levels = activityLabels[,1],labels = tolower(activityLabels[,2]))
##Turn the subjects and activities columns into factor
MergedData$Subjects <- as.factor(MergedData$Subjects)
##Appropriately labels the data set with descriptive variable names
names(MergedData)<-gsub("^t", "Time", names(MergedData))
names(MergedData)<-gsub("^f", "Frequency", names(MergedData))
names(MergedData)<-gsub("Acc", "Accelerometer", names(MergedData))
names(MergedData)<-gsub("Gyro", "Gyroscope", names(MergedData))
names(MergedData)<-gsub("Mag", "Magnitude", names(MergedData))
names(MergedData)<-gsub("BodyBody", "Body", names(MergedData))
names(MergedData)<-gsub("mean", "Mean", names(MergedData))
names(MergedData)<-gsub("std", "Std", names(MergedData))
##Create a second, independent tidy data set with the average
##of each variable for each activity and each subject.
MergedData.aggr <- aggregate(. ~ Subjects - Activities, data = MergedData, mean)
MergedData.mean <- tbl_df(arrange(MergedData.aggr,Subjects,Activities))
write.table(MergedData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
64d805742fd6e67b132d86d62f384bca775531b8 | e807c83cd7e9c8b22607a62f37bef004c93afe36 | /FitBitR_Getdata.R | 77acbb57385810b153b45ace1c1a6d372b725f8f | [] | no_license | Chmavo/Regression-with-FitBit | fd2adc7a40320c54bbcf44e48661e7ade9ba982d | e3ff59dbcabf82ffbd6421ff2c06896a0a75fd2f | refs/heads/master | 2021-05-01T07:38:05.905933 | 2018-02-11T19:53:15 | 2018-02-11T19:53:15 | 121,160,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | FitBitR_Getdata.R | library(fitbitr)
library(plyr)
#get new API token
token <- get_fitbit_token()
#make call to fitbit API to download data and parse response to JSON
hr_resp <-
fitbit_GET("1/user/-/activities/heart/date/2016-01-01/today.json",
token = token)
hr_ret <- fitbit_parse(hr_resp)
hr_ret.i <-
lapply(hr_ret$`activities-heart`, function(x) {
unlist(x)
})
hr_data <- rbind.fill(lapply(hr_ret.i,
function(x)
do.call("data.frame", as.list(x))))
hr_data$value.restingHeartRate <-
as.numeric(as.character(hr_data$value.restingHeartRate)) |
ed1084bfe3044ef6f492ff077e365bd6c610c69e | 1e33994decb847455101d5ca3ebef647ee2d11b9 | /plot1.R | 8a1a8dc333b393ec455e17b7585aa32c95782351 | [] | no_license | byron-data/Exploratory-Data-Analysis-Assignment-1 | 13c6b8bcda80ccf3c2afa2e9a00abdd71e45ac29 | dd4c7447c40d540b20218bb1939d4e5da2acfeb6 | refs/heads/master | 2021-06-13T15:09:55.254016 | 2017-04-15T06:53:57 | 2017-04-15T06:53:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 921 | r | plot1.R | # Download the data
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="household_power_consumption.zip")
# Unpack the zip file
unzip(zipfile="household_power_consumption.zip")
library(sqldf)
# Read all household power consumption data only for 1/2/2007 and 2/2/2007
hpc <- read.csv.sql("household_power_consumption.txt", sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'", header=TRUE, sep=";")
###
# Graph 1.
###
# Open PNG graphics device with width 480 and height 480
png('plot1.png', width=480, height=480)
# Set up a histogram of Global_active_power with red bars, an x label (y label default) and a main title
hist(hpc$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
#dev.copy(png, file = "plot1.png") # alternative (not used)
# Turn off graphics device
dev.off()
|
3a4cd486e73c786e0288b990383f974a87ea913b | 0a399ef04f0e06734640cd259f7d3d6d8cd4362f | /Riesgos competitivos/competing risks.R | 432566d2407742a23894020443397d82e9739804 | [] | no_license | mrainmontececinos/competing-risks | 8b4accf0ab47b09d2b2d4cf7ad6ba380f8383e58 | 15221e187fbdd1aa825dcc4ebd02ecb3a9625501 | refs/heads/main | 2023-01-13T01:14:20.939494 | 2020-11-15T03:49:42 | 2020-11-15T03:49:42 | 310,975,475 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,521 | r | competing risks.R | library(riskRegression)
library(ggfortify)
library(survminer)
library(plotly)
library(gridExtra)
data(Melanoma)
set.seed(234)
Melanoma$id=sample(1:205)
str(Melanoma)
table(Melanoma$event)
Melanoma$event=factor(Melanoma$event, levels = c("censored"
,"death.malignant.melanoma",
"death.other.causes"),
labels = c("censurado","murio",
"otra causa"))
Melanoma$sex=factor(Melanoma$sex, levels = c("Female","Male"),
labels = c("Mujer","Hombre"))
ggplotly(
Melanoma %>%
mutate(
text = paste("id = ", id,
"<br>", "time= ", time,
"<br>", "status = ", status,
"<br>", " age= ", round(age, 2))
) %>%
ggplot(aes(x = id, y = time, text = text)) +
geom_linerange(aes(ymin = 0, ymax = time)) +
geom_point(aes(shape = status, color = status), stroke = 1, cex = 2) +
scale_shape_manual(values = c(1, 3, 4)) +
labs(y = "Tiempo (días)", x = "id") + coord_flip() + theme_classic(),
tooltip = "text"
)
###################################################
### 1.1
### Comparación no paramétrica de CIF
library(cmprsk)
cif<-cuminc(ftime = Melanoma$time, fstatus = Melanoma$status, group=Melanoma$sex)
plot(cif,col=1:4,xlab="Days")
cif
#########################################
#### 1.2 Cause-specific hazard regression
#### Regresión de riesgo de causa específica
### Regresión de Cox
csh<-coxph(Surv(time,status==1)~sex+invasion,data=Melanoma)
summary(csh)
library("riskRegression")
library("prodlim")
CSH<-CSC(Hist(time,status)~sex+age+invasion,data=Melanoma)
CSH
library("pec")
pec::predictEventProb(CSH,cause=1,newdata=data.frame(age=40,invasion=factor("level.2",levels=levels(Melanoma$invasion)),
sex=factor("Hombre",levels=levels(Melanoma$sex))),
time=c(1000,2000,3000))
SH <- FGR(Hist(time,status)~sex+age+invasion,data=Melanoma)
SH
cov<-model.matrix(~sex+age+invasion,data=Melanoma)[,-1]
cov
crr.model<-crr(Melanoma$time,Melanoma$status,cov1=cov)
crr.model
### 1.4 Model prediction
### Predicción del modelo
newdata<-data.frame(sex=factor(c("Hombre","Hombre","Mujer"),
levels=levels(Melanoma$sex)),age=c(52,32,59),
invasion=factor(c("level.2","level.1","level.2"),
levels=levels(Melanoma$invasion)))
newdata
dummy.new<-model.matrix(~sex+age+invasion,data=newdata)[,-1]
dummy.new
pred<-predict(crr.model,dummy.new)
plot(pred,lty=1:3,col=1:3,xlab="Días",ylab="Cumulative incidence function")
legend("topleft",c("Hombre,age=52,invasion2","Hombre,age=32,
invasion1","Female,age=59,invasion2"),lty=1:3,col=1:3)
#### Análisis de supervivencia en presencia de riesgos competitivos
reg<-riskRegression(Hist(time, status) ~ sex + age +invasion, data = Melanoma, cause = 1,link="prop")
reg
plot(reg,newdata=newdata)
### 1.5 Model diagnostic
### Modelo de diagnóstico
checkdata<-data.frame(sex=factor(c("Hombre","Hombre","Hombre"),
levels=levels(Melanoma$sex)),
age=c(46,46,46),invasion=factor(c("level.0","level.1","level.2"),
levels=levels(Melanoma$invasion)))
checkdata
plot(reg,newdata=checkdata,lty=1:3,col=1:3)
text(2000,1,"Covariates sex='Hombre'; age=52")
legend("topleft",c("invasion.level0","invasion.level1","invasion.level2"),lty=1:3,col=1:3)
crr.time<-crr(Melanoma$time,Melanoma$status,cov1=cov,
cov2=cov[,1],tf=function(t) t)
summary(crr.time)
# Regresión de riesgos competitivo
reg.time<-riskRegression(Hist(time, status) ~ sex + age +
strata(invasion), data = Melanoma, cause = 1,link="prop")
plotEffects(reg.time,formula=~invasion)
par(mfrow=c(2,2))
for(j in 1:ncol(crr.model$res)) {
scatter.smooth(crr.model$uft, crr.model$res[,j],
main =names(crr.model$coef)[j],
xlab = "Failure time",
ylab ="Schoenfeld residuals")
}
crr.time2<-crr(Melanoma$time,Melanoma$status,cov1=cov,cov2=cov[,3],tf=function(t) t)
crr.time2
crr.time3<-crr(Melanoma$time,Melanoma$status,cov1=cov,cov2=cbind(cov[,3], cov[,3]),tf=function(t) cbind(t,t^2),)
crr.time3
|
8e7d3b86d3d3f51bb18eb03401af6007907db331 | 46e703f23b8e5db1281d92007592419554061331 | /plot3.R | cceeda49bacf931918d3fce99c08e4331729175e | [] | no_license | stradaconsulting/ExData_Plotting1 | 7f2117ee2438e8f79d423370f460db09e48464a1 | ca143522e6d2442a46f615cef73d36936e6417d4 | refs/heads/master | 2020-12-31T03:25:06.683266 | 2015-07-11T02:41:55 | 2015-07-11T02:41:55 | 38,903,537 | 0 | 0 | null | 2015-07-10T21:49:25 | 2015-07-10T21:49:25 | null | UTF-8 | R | false | false | 2,569 | r | plot3.R | ## R-Programming Coursera
## Assignment 1
## Plot3
## Prepared by: Santiago Oleas - soleas@gmail.com
## Set working directory to local github folder
setwd("/Users/santiago/git/coursera/Exploratory Data Analysis/Assignment 1/ExData_Plotting1")
##Get File
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "powerConsumption.zip", method = "curl")
##Unzip file
unzip("powerConsumption.zip",overwrite=TRUE)
##Read File
inputData <- read.table(file="./household_power_consumption.txt", sep=";", header=TRUE, na.strings =c("?"))
## Fix date and time columns using strptime
inputData$Time <- as.POSIXct(paste(inputData$Date, inputData$Time), format="%d/%m/%Y %H:%M:%S")
inputData$Date <- strptime(inputData$Date, "%d/%m/%Y")
## We only want a subset of days 20070201 to 20070202
plot3Data <- inputData[inputData$Date>="2007-02-01" & inputData$Date<="2007-02-02",]
## We will get the minimum value of each sub-metering variable and put
## into a variable
minRange <- c(min(plot3Data$Sub_metering_1),
min(plot3Data$Sub_metering_2),
min(plot3Data$Sub_metering_3))
## We wil get the maximum value of each sub-metering variable and put
## into a variable
maxRange <- c(max(plot3Data$Sub_metering_1),
max(plot3Data$Sub_metering_2),
max(plot3Data$Sub_metering_3))
##Generate plot3.png
png("plot3.png", width=480, height=480, units="px") ## Save to file 480x480 pixels
## This is line plot for each SubMetering value (1,2,3) vs Date/Time
## Start with blank: type = "n"
## There is NO x-axis label
## y-label is to be "Energy sub metering"
## There is NO main title
## We must set the limit of the y-range
## It will be from the smallest of our minRange to biggest of our maxRange
plot( plot3Data$Time, plot3Data$Sub_metering_1
, type="n"
, xlab = ""
, ylab = "Energy sub metering"
, ylim=(c(min(minRange),max(maxRange)))
)
#draw Sub_metering_1 line. Leave as default colour black
lines(plot3Data$Time,plot3Data$Sub_metering_1)
#draw Sub_metering_2 line. Make red
lines(plot3Data$Time,plot3Data$Sub_metering_2, col="red")
#draw Sub_metering_3 line. Make blue
lines(plot3Data$Time,plot3Data$Sub_metering_3, col="blue")
#Legend
#Place top-right
#We want 3 lines (lty)
#Colours of legend
#Legend labels
legend("topright",
lty=c(1,1,1),
col=c("black","blue","red"),
legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
dev.off() ##IMPORTANT always when sending to file
## END |
7b5114a05aaf05e1aaacde7547d09c61def26163 | 7ba42ea09417547219343e5532a1f7954bdf10b2 | /man/nlp_xlm_roberta_sentence_embeddings_pretrained.Rd | e8b2245f3cb7ae4e9c7d0c036e06e1a32ceda54c | [
"Apache-2.0"
] | permissive | r-spark/sparknlp | 622822b53e2b5eb43508852e39a911a43efa443f | 4c2ad871cc7fec46f8574f9361c78b4bed39c924 | refs/heads/master | 2023-03-16T05:35:41.244593 | 2022-10-06T13:42:00 | 2022-10-06T13:42:00 | 212,847,046 | 32 | 7 | NOASSERTION | 2023-03-13T19:33:03 | 2019-10-04T15:27:28 | R | UTF-8 | R | false | true | 1,874 | rd | nlp_xlm_roberta_sentence_embeddings_pretrained.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xlm_roberta_sentence_embeddings.R
\name{nlp_xlm_roberta_sentence_embeddings_pretrained}
\alias{nlp_xlm_roberta_sentence_embeddings_pretrained}
\title{Load a pretrained Spark NLP XlmRoBertaSentenceEmbeddings model}
\usage{
nlp_xlm_roberta_sentence_embeddings_pretrained(
sc,
input_cols,
output_col,
case_sensitive = NULL,
batch_size = NULL,
dimension = NULL,
max_sentence_length = NULL,
name = NULL,
lang = NULL,
remote_loc = NULL
)
}
\arguments{
\item{sc}{A Spark connection}
\item{input_cols}{Input columns. String array.}
\item{output_col}{Output column. String.}
\item{case_sensitive}{whether to lowercase tokens or not}
\item{batch_size}{batch size}
\item{dimension}{defines the output layer of BERT when calculating embeddings}
\item{max_sentence_length}{max sentence length to process}
\item{name}{the name of the model to load. If NULL will use the default value}
\item{lang}{the language of the model to be loaded. If NULL will use the default value}
\item{remote_loc}{the remote location of the model. If NULL will use the default value}
}
\value{
The Spark NLP model with the pretrained model loaded
}
\description{
Create a pretrained Spark NLP \code{XlmRoBertaSentenceEmbeddings} model.
See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#xlmrobertasentenceembeddings}
}
\details{
Sentence-level embeddings using XLM-RoBERTa. The XLM-RoBERTa model was proposed in
Unsupervised Cross-lingual Representation Learning at Scale by Alexis Conneau,
Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán,
Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on
Facebook's RoBERTa model released in 2019. It is a large multi-lingual language model,
trained on 2.5TB of filtered CommonCrawl data.
}
|
341cf71841db4280d62286cb968274f7f99b5297 | 0ebf8d4a144f4bc53b113992068c88fa78e726c0 | /man/gm.sd.Rd | b1e061b88c02ed05a9b1e1dc10bcd336bd4121a2 | [] | no_license | cran/evoper | de054e9df5b17afbb57df4fafb1d33347c8fdbb6 | c81202d787a6e06eab3416a563e3967602ae653a | refs/heads/master | 2021-01-20T19:14:08.061212 | 2018-08-30T22:20:06 | 2018-08-30T22:20:06 | 61,308,786 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 401 | rd | gm.sd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper-functions.R
\name{gm.sd}
\alias{gm.sd}
\title{gm.sd}
\usage{
gm.sd(x, mu = NULL)
}
\arguments{
\item{x}{data}
\item{mu}{The geometric mean. If not provided it is calculated.}
}
\value{
geometric standard deviation for data
}
\description{
Simple implementation for geometric standard deviation
}
|
400f9770d6410e09307aa514c33f97e48854727b | 2d9fb03feb8626c67ba5d3f1a0815710b621c5f6 | /R/processing_time_log.R | f43865ee677288a693ad3787627b4e4c7da3e453 | [] | no_license | bbrewington/edeaR | 4c8916bad4c54521764574770ae941983363dc0a | 02b31d133b5cec68caa6e0c5fa446a6a6275d462 | refs/heads/master | 2021-01-19T18:32:49.442081 | 2016-08-27T17:31:36 | 2016-08-27T17:31:36 | 66,726,375 | 0 | 0 | null | 2016-08-27T17:17:51 | 2016-08-27T17:17:51 | null | UTF-8 | R | false | false | 442 | r | processing_time_log.R |
processing_time_log <- function(eventlog,
units = "days") {
stop_eventlog(eventlog)
r <- processing_time_case(eventlog, units = units)
s <- summary(r$processing_time)
s <- c(s, St.Dev = sd(r$processing_time))
s <- c(s, IQR = s[5] - s[2])
s <- c(s, tot = sum(r$processing_time))
names(s) <- c("min","q1","median","mean","q3","max","st_dev","iqr", "tot")
s <- as.data.frame(s)
s <- t(s)
row.names(s) <- NULL
return(s)
}
|
8f86a3d7f2da114cc6d61294a7ffe3dee746efaf | c29c7b4b9e3d1e1e7b1ea8d888bbb38719ceec89 | /GSEA_TF_plotter.R | 9a95719dd2a3d18c44d038c4a77bbebb4117b1fe | [] | no_license | bweasels/SequencingUtilities | 1732c53b9a1b602fe50f387af44d4de8108822e5 | 45255e18b56b620e00803c725dd56d18bae1051b | refs/heads/master | 2021-07-06T16:55:05.734785 | 2020-10-15T17:43:36 | 2020-10-15T17:43:36 | 194,119,328 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,723 | r | GSEA_TF_plotter.R | #Plots the transcription factor binding sites and tells if they are significant or not
#requires
#library(TFEA.ChIP)
GSEA_TF_Plotter <- function(results, outputDir, timepoint){
#get the data and make the filename for outputs
fileName <- results@elementMetadata
fileName <- grep('Condition',fileName$description, value = T)
fileName <- gsub('.*(Condition.*)', '\\1', fileName[1])
fileName <- gsub(' ', '.', fileName)
results <- data.frame(results@listData, Genes = rownames(results))
if(nrow(results) > 5){
#prep data for the GSEA run & generate the plot of TFs
data <- preprocessInputData(results)
GSEA.results <- GSEA_run(data$Genes, data$log2FoldChange, get.RES = T)
p <- plot_ES(GSEA.results, data$log2FoldChange)
htmlwidgets::saveWidget(p, file = paste0(outputDir, timepoint,'hr.', fileName, '.TFPlots.html'))
#Find the significant TFs (pval < 0.05)
enrichTable <- GSEA.results[['Enrichment.table']]
enrichTable <- enrichTable[enrichTable$pval.ES<.05,]
#create the list of unique elements & get their counts
Tfactors <- unique(GSEA.results[['Enrichment.table']]$TF)
counts <- vector(length = length(Tfactors))
names(counts) <- Tfactors
for (i in 1:length(Tfactors)){
counts[i] <- sum(GSEA.results[['Enrichment.table']]$TF %in% Tfactors[i])
}
counts <- sort(counts, decreasing = T)
counts <- counts[counts>5]
counts <- data.frame(TF = factor(names(counts), levels = names(counts)), count = counts)
pdf(paste0(outputDir, timepoint,'hr.', fileName, 'TFCounts.pdf'), width = 20)
gplot <- ggplot(counts, aes(x = TF, y = count)) + geom_bar(stat='identity')
print(gplot)
dev.off()
}
} |
8612997dae78b7b36639900aaae1c40b8447274e | cbb8f3ba14092d175a1903d8b6746d912f868379 | /src/inst/tests/not_ready/test.get_kappa.R | 9cbdbadfe02d5fce65115967bbbf4228c0cf32b4 | [] | no_license | howl-anderson/sdmutility | 6d02801e5dd156861e81b616e4efd63b8f26ee99 | bb93097677a63a8292336c52a8236fdf0e78afa0 | refs/heads/master | 2021-01-19T03:31:59.720547 | 2015-09-04T06:52:11 | 2015-09-04T06:52:11 | 41,901,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | test.get_kappa.R | test_that("get kappa", {
library("raster")
current.layer <- raster(nrow=3, ncol=3, xmn=0, xmx=3, ymn=0, ymx=3, crs=NA)
current.layer[] <- c(1,1,0,1,1,0,1,0,0)
future.layer <- raster(nrow=3, ncol=3, xmn=0, xmx=3, ymn=0, ymx=3, crs=NA)
future.layer[] <- c(0,1,1,1,1,1,0,1,0)
cm <- confusionMatrix(current.layer, future.layer)
kappa.score <- getKappa(cm)
})
|
d242883ac7eb715283a12d733b06dd686c054ab4 | 30bf67317024c30c60a6699e99e465537a3fed73 | /lp/lp0.R | 4342fa42a1e2bdf20c16eef7be0959de9545427a | [] | no_license | dupadhyaya/rorlp | 4b3c179894a77b27ce8113ed73a224cbd0620240 | eeaab3d2bb110fc342b1a8d29eef979c7d7a9490 | refs/heads/master | 2020-03-22T19:25:57.833405 | 2018-08-06T09:36:37 | 2018-08-06T09:36:37 | 140,527,136 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 514 | r | lp0.R | # Transport Problem in R
#
lp(direction = "min", objective.in, const.mat, const.dir, const.rhs,
transpose.constraints = TRUE, int.vec, presolve=0, compute.sens=0,
binary.vec, all.int=FALSE, all.bin=FALSE, scale = 196, dense.const,
num.bin.solns=1, use.rw=FALSE)
lp.assign (cost.mat, direction = "min", presolve = 0, compute.sens = 0)
lp.object
lp.transport(cost.mat, direction="min", row.signs, row.rhs, col.signs,
col.rhs, presolve=0, compute.sens=0, integers = 1:(nc*nr) )
?make.q8
|
add10c08341525f078c3f534bb02125c74ea98ee | dd5f8768d4ee1df9d161c35d2f8c09bcc5b71319 | /plot3.R | 941f0fcf50f01c70c04082d0db99bafa7f4b1cdc | [] | no_license | M16B/ExData_Plotting1 | 64dbc666824f02a88ed86e35087c165f4669ec3b | fc7417fa7dbf2ceb18f9abeeaed8d5f8f51b8058 | refs/heads/master | 2021-01-17T09:06:22.706120 | 2015-07-13T00:33:20 | 2015-07-13T00:33:20 | 38,981,551 | 0 | 0 | null | 2015-07-12T23:09:51 | 2015-07-12T23:09:51 | null | UTF-8 | R | false | false | 1,023 | r | plot3.R | #Read in Data
data <- read.table("./data/household_power_consumption.txt", header = T,
sep = ";", stringsAsFactors = F, dec = ".")
#Get subset of data
subData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#Get x axis values
time <- strptime(paste(subSetData$Date, subSetData$Time, sep = " "),
"%d/%m/%Y %H:%M:%S")
#Get y axis values
globalActivePower <- as.numeric(subData$Global_active_power)
#Lines
Sub_Metering_1 <- as.numeric(subData$Sub_metering_1)
Sub_Metering_2 <- as.numeric(subData$Sub_metering_2)
Sub_Metering_3 <- as.numeric(subData$Sub_metering_3)
#send to file
png("plot3.png", width = 480, height = 480)
#Graph
plot(time, Sub_Metering_1, type = "l", ylab = "Energy Submetering", xlab = "")
lines(time, Sub_Metering_2, type = "l", col = "red")
lines(time, Sub_Metering_3, type = "l", col = "blue")
#Create Legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
dev.off() |
f94e274a6f248a6d3660c292690a48563e90a2c4 | 9c89a2b82537f3d905579f2db5de51aa6748e3f8 | /Average_secretion.R | 83d13f5949865ab3dada56d1d483b0b386b1a87e | [] | no_license | kleppem/data_R | 0ccda4ea1510bfaf5ebf67f507ba52244ccf04f7 | 47b34c92ae571b84f413cb345d4b8e67f51f7c96 | refs/heads/master | 2020-05-30T10:15:25.542525 | 2014-10-25T00:53:39 | 2014-10-25T00:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 324 | r | Average_secretion.R | #read table
dp<-read.table("LINposGFPpos", check.names=TRUE, header=TRUE)
#convert into data frame
dfdp<-data.frame(dp)
#convert data frame into matrix
Mdp<-as.matrix(dfdp)
#Convert values =0 to NA
Mfl.transpose[which(Mfl.transpose<=0)] = NA
#Average of specific column excluding NA
colMeans(Mfl.transpose, na.rm = TRUE) |
de3c08e68ec0f89e9f8a933d518049b86d5809d9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/santaR/examples/get_grouping.Rd.R | 93e1f879c17d96ec544cb049a3d7adf1139da78a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 653 | r | get_grouping.Rd.R | library(santaR)
### Name: get_grouping
### Title: Generate a matrix of group membership for all individuals
### Aliases: get_grouping
### ** Examples
## 3 subjets in 2 groups
ind <- c('ind_1','ind_1','ind_1','ind_2','ind_2','ind_3')
group <- c('g1','g1','g1','g2','g2','g1')
get_grouping(ind, group)
# ind group
# 1 ind_1 g1
# 2 ind_2 g2
# 3 ind_3 g1
## 8 subjects in 2 groups
ind <- acuteInflammation$meta$ind
group <- acuteInflammation$meta$group
get_grouping(ind, group)
# ind group
# 1 ind_1 Group1
# 2 ind_2 Group2
# 3 ind_3 Group1
# 4 ind_4 Group2
# 5 ind_5 Group1
# 6 ind_6 Group2
# 7 ind_7 Group1
# 8 ind_8 Group2
|
524a2b278bde88b433f9e9ee723ab3477b37bd6b | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/jmzeng1314/where_is_my_Mr_Right/ui.R | cde22c903276542124c9d3cc98b71169ba79fc28 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,510 | r | ui.R | library(shiny)
library(shinydashboard)
characters=read.table('characters.txt',stringsAsFactors = F,encoding = 'UTF-8')
## TODO:
## statistics page :
header <- dashboardHeader(title="找来找去 随机匹配")
sider <- dashboardSidebar(
sidebarMenu(
menuItem("来玩呗", tabName="play", icon=icon("meetup")),
menuItem("看看呗", tabName="stat", icon=icon("line-chart")),
menuItem("免责", tabName="license", icon=icon("legal"))
))
# choices
# 信息录入界面
# inputId, "name", "age", "city", "mail", "married", ,
# buttion: "check", "my_button","her_button"
# outpuId, "cloud"
# 标签云
choices <- c("多金", "高")
play_items <-tabItem(tabName="play", fluidRow(
box(
textInput(inputId="name", label = "尊姓大名"),
sliderInput(inputId="age", label = "贵庚", min = 20, max = 40, value = 30),
textInput(inputId="city", label="身居何处", placeholder = "上海" ),
textInput(inputId="phone", label = "可否给个号码", placeholder="可选"),
textInput(inputId="mail", label = "邮箱", placeholder = "必选"),
checkboxInput(inputId="married", label = "单身",value = TRUE),
actionButton(inputId="check",label = "确定"),
solidHeader = TRUE,
width = 4
),
box(
tags$div(align="center",tags$h3("你是什么样的一个人")),
#plotOutput(outputId="cloud",height = 300),
#textInput(inputId="characters",label="用上面的标签描述一下你,以空格分隔",
# placeholder = "可爱 迷人"),
selectizeInput('characters', label = "用上面的标签描述一下你,以空格分隔",
choices = characters,# width = 275,
multiple=T,
options = list(placeholder = "可爱 迷人",
maxOptions = 1000)
),
actionButton(inputId="my_button",label = "这就是我"),
DT::dataTableOutput('IPinfor'),
width = 5 ,
height = 500,
background = "purple"
),
box(
width = 3,
column( width= 12,
tags$h4("你希望他/她如何"),
selectInput(inputId="choice", label = "只能选择一个哦", choices = choices),
actionButton(inputId="her_button",label = "就这个吧")
)
)
))
# 主要内容
body <- dashboardBody(
shinyjs::useShinyjs(),
tags$script(src="getIP.js"),
tabItems(
play_items,
tabItem(tabName="stat"),
tabItem(tabName="license")
)
)
ui <- dashboardPage(
header,
sider,
body,
skin = "purple"
)
|
a1a0f2be9c8b56caad9f6025a8d4fd942a8e111c | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Fundamentals_Of_Mathematical_Statistics_by_S.c._Gupta,_V.k._Kapoor/CH5/EX5.3/EX5_3.R | 9db1f18db9abfb1d0cb1a156380ba110b1a1ffd6 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 501 | r | EX5_3.R | #Page number--5.7
#Example number--5.3
#LOAD PACKAGE----->prob
s=rolldie(2)
s
X=c(1,2,3,4,5,6,7,8,9,10,11,12)
a=0
for(i in 1:12){
a[i]=nrow(subset(s,X1+X2==i))/nrow(s)
}
#Probability chart
plot(X,a,type="h",xlim=c(0,12),ylim=c(0,0.17),ylab="Prob.",xlab="Sum of two faces")
#Cummulative probability distribution
FX=c(sum(a[1]),sum(a[1:2]),sum(a[1:3]),sum(a[1:4]),sum(a[1:5]),sum(a[1:6]),sum(a[1:7]),sum(a[1:8]),sum(a[1:9]),sum(a[1:10]),sum(a[1:11]),sum(a[1:12]))
data.frame(X,FX)
|
fa5aaea64091c9e388ffca4c78b5cd779a7b01cb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/treeplyr/examples/reorder.treedata.Rd.R | a5f3b32f8f72fb4caec463aa88193cf5579fe3b4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 224 | r | reorder.treedata.Rd.R | library(treeplyr)
### Name: reorder
### Title: Reorder a 'treedata' object
### Aliases: reorder reorder.treedata
### ** Examples
data(anolis)
td <- make.treedata(anolis$phy, anolis$dat)
td <- reorder(td, "postorder")
|
b63d236b4c47b0cd188c95108ed8c1874a5cca3b | b59cc783d2da2f32737432c1b13cf72c5802f067 | /R/collect.results.R | 1fbfd0095ff00c47557bddda43962c48bc17b2b5 | [] | no_license | jdsimkin04/shinyinla | 9a16007b375975a3f96b6ca29a1284aa6cafb180 | e58da27a2a090557058b2a5ee63717b116216bf7 | refs/heads/master | 2023-06-05T08:34:34.423593 | 2021-06-24T00:27:04 | 2021-06-24T00:27:04 | 330,322,338 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 93,429 | r | collect.results.R | ## Export: inla.collect.results
## ! \name{inla.collect.results}
## ! \alias{inla.collect.results}
## ! \alias{collect.results}
## ! \title{Collect results from a inla-call}
## ! \description{\code{inla.collect.results} collect results from a inla-call}
## ! \usage{
## ! inla.collect.results(
## ! results.dir,
## ! control.results = inla.set.control.results.default(),
## ! debug=FALSE,
## ! only.hyperparam=FALSE,
## ! file.log = NULL,
## ! file.log2 = NULL)
## !}
## ! \arguments{
`inla.collect.results` <-
function(
## ! \item{results.dir}{The directory where the results of the inla run are stored}
results.dir,
## ! \item{control.results}{a list of parameters controlling the
## ! output of the function; see \code{?control.results}}
control.results = inla.set.control.results.default(),
## ! \item{debug}{Logical. If \code{TRUE} some debugging information are printed}
debug = FALSE,
## ! \item{only.hyperparam}{Binary variable indicating wheather only the
## ! results for the hyperparameters should be collected}
only.hyperparam = FALSE,
## ! \item{file.log}{Character. The filename, if any, of the logfile for
## ! the internal calculations}
file.log = NULL,
## ! \item{file.log2}{Character. The filename, if any, of the logfile2 for
## ! the internal calculations}
file.log2 = NULL) {
## ! }
## ! \value{ The function returns an object of class \code{"inla"}, see the
## ! help file for \code{inla} for details.}
## !
## ! \details{This function is mainly used inside \code{inla}
## ! to collect results after running the inla
## ! function. It can also be used to collect results into R after having
## ! runned a inla section outside R. }
if (is.na(file.info(results.dir)$isdir) ||
!file.info(results.dir)$isdir) {
stop(paste("This is not a directory: ", results.dir, "\n"))
}
filename <- paste(results.dir, "/.ok", sep = "")
res.ok <- file.exists(filename)
if (!res.ok) {
## try this one instead
results.dir.new <- paste(results.dir, "/results.files", sep = "")
filename <- paste(results.dir.new, "/.ok", sep = "")
res.ok <- file.exists(filename)
if (res.ok) {
if (debug) {
cat(paste("inla.collect.results: retry with directory", results.dir.new, "\n"))
}
return(inla.collect.results(results.dir.new,
control.results = control.results,
debug = debug,
only.hyperparam = only.hyperparam,
file.log = file.log,
file.log2 = file.log2
))
} else {
## neither directories contain the file /.ok, then we
## assume the inla-program has crashed
inla.inlaprogram.has.crashed()
}
}
if (!only.hyperparam) {
res.fixed <- inla.collect.fixed(results.dir, debug)
res.lincomb <- inla.collect.lincomb(results.dir, debug, derived = FALSE)
res.lincomb.derived <- inla.collect.lincomb(results.dir, debug, derived = TRUE)
res.dic <- inla.collect.dic(results.dir, debug)
res.cpo.pit <- inla.collect.cpo(results.dir, debug)
res.po <- inla.collect.po(results.dir, debug)
res.waic <- inla.collect.waic(results.dir, debug)
res.random <- inla.collect.random(results.dir, control.results$return.marginals.random, debug)
res.predictor <- inla.collect.predictor(results.dir, control.results$return.marginals.predictor, debug)
res.spde2.blc <- inla.collect.spde2.blc(results.dir, control.results$return.marginals.random, debug)
res.spde3.blc <- inla.collect.spde3.blc(results.dir, control.results$return.marginals.random, debug)
file <- paste(results.dir, .Platform$file.sep, "neffp", .Platform$file.sep, "neffp.dat", sep = "")
neffp <- matrix(inla.read.binary.file(file), 3, 1)
rownames(neffp) <- inla.trim(c(
"Expectected number of parameters",
"Stdev of the number of parameters",
"Number of equivalent replicates"
))
} else {
res.fixed <- NULL
res.lincomb <- NULL
res.lincomb.derived <- NULL
res.dic <- NULL
res.cpo.pit <- NULL
res.po <- NULL
res.waic <- NULL
res.random <- NULL
res.predictor <- NULL
res.spde2.blc <- NULL
res.spde3.blc <- NULL
neffp <- NULL
}
res.mlik <- inla.collect.mlik(results.dir, debug)
res.q <- inla.collect.q(results.dir, debug)
res.graph <- inla.collect.graph(results.dir, debug)
res.offset <- inla.collect.offset.linear.predictor(results.dir, debug)
## get the hyperparameters
theta.mode <- inla.read.binary.file(paste(results.dir, .Platform$file.sep, ".theta_mode", sep = ""))[-1]
x.mode <- inla.read.binary.file(paste(results.dir, .Platform$file.sep, ".x_mode", sep = ""))[-1]
gitid <- readLines(paste(results.dir, .Platform$file.sep, ".gitid", sep = ""))
lfn.fnm <- paste(results.dir, .Platform$file.sep, "linkfunctions.names", sep = "")
if (file.exists(lfn.fnm)) {
linkfunctions.names <- readLines(lfn.fnm)
fp <- file(paste(results.dir, .Platform$file.sep, "linkfunctions.link", sep = ""), "rb")
n <- readBin(fp, integer(), 1)
idx <- readBin(fp, double(), n)
ok <- which(!is.nan(idx))
idx[ok] <- idx[ok] + 1
close(fp)
linkfunctions <- list(names = linkfunctions.names, link = as.integer(idx))
} else {
linkfunctions <- NULL
}
if (length(theta.mode) > 0) {
res.hyper <- inla.collect.hyperpar(results.dir, debug)
## get the joint (if printed)
alldir <- dir(results.dir)
if (length(grep("joint.dat", alldir)) == 1) {
if (debug) {
print("inla.collect.joint hyperpar")
}
fnm <- paste(results.dir, "/joint.dat", sep = "")
if (file.info(fnm)$size > 0) {
joint.hyper <- read.table(fnm)
} else {
joint.hyper <- NULL
}
} else {
joint.hyper <- NULL
}
} else {
res.hyper <- NULL
joint.hyper <- NULL
}
logfile <- list(logfile = c(
inla.collect.logfile(file.log, debug)$logfile,
"", paste(rep("*", 72), sep = "", collapse = ""), "",
inla.collect.logfile(file.log2, debug)$logfile
))
misc <- inla.collect.misc(results.dir, debug)
theta.tags <- NULL
mode.status <- NA
if (!is.null(misc)) {
## put also theta.mode in here
misc$theta.mode <- theta.mode
## we need theta.tags for later usage
if (!is.null(misc$theta.tags)) {
theta.tags <- misc$theta.tags
}
mode.status <- misc$mode.status
if (!is.null(misc$lincomb.derived.correlation.matrix)) {
if (!is.null(res.lincomb.derived)) {
id <- res.lincomb.derived$summary.lincomb.derived$ID
tag <- rownames(res.lincomb.derived$summary.lincomb.derived)
R <- misc$lincomb.derived.correlation.matrix
rownames(R) <- colnames(R) <- tag[id]
misc$lincomb.derived.correlation.matrix <- R
} else {
misc$lincomb.derived.correlation.matrix <- NULL
}
}
if (!is.null(misc$lincomb.derived.covariance.matrix)) {
if (!is.null(res.lincomb.derived)) {
id <- res.lincomb.derived$summary.lincomb.derived$ID
tag <- rownames(res.lincomb.derived$summary.lincomb.derived)
R <- misc$lincomb.derived.covariance.matrix
rownames(R) <- colnames(R) <- tag[id]
misc$lincomb.derived.covariance.matrix <- R
} else {
misc$lincomb.derived.covariance.matrix <- NULL
}
}
## also put the linkfunctions here
misc$linkfunctions <- linkfunctions
if (!is.null(linkfunctions)) {
## a better name
misc$family <- linkfunctions$link
}
}
## add the names of the theta's here, as they are available.
if (!is.null(misc) && !is.null(joint.hyper)) {
colnames(joint.hyper) <- c(misc$theta.tags, "Log posterior density")
}
names(theta.mode) <- theta.tags
res <- c(res.fixed, res.lincomb, res.lincomb.derived, res.mlik,
list(cpo = res.cpo.pit), list(po = res.po), list(waic = res.waic),
res.random, res.predictor, res.hyper,
res.offset, res.spde2.blc, res.spde3.blc, logfile,
list(
misc = misc,
dic = res.dic, mode = list(
theta = theta.mode, x = x.mode,
theta.tags = theta.tags, mode.status = mode.status,
log.posterior.mode = misc$log.posterior.mode
),
neffp = neffp,
joint.hyper = joint.hyper, nhyper = length(theta.mode),
version = list(inla.call = gitid, R.INLA = inla.version("version"))
),
list(Q = res.q),
res.graph,
ok = res.ok
)
class(res) <- "inla"
if (inla.getOption("internal.experimental.mode")) {
if (debug) {
print("...Fix marginals")
}
## set the inla.marginal class to all the marginals, and add tag
## used for plotting. all these have two levels:
idxs <- grep("marginals[.](fixed|linear[.]predictor|lincomb[.]derived|lincomb|hyperpar|fitted[.]values)", names(res))
if (length(idxs) > 0) {
for (idx in idxs) {
if (!is.null(res[[idx]])) {
name.1 <- names(res)[idx]
attr(res[[idx]], "inla.tag") <- name.1
class(res[[idx]]) <- "inla.marginals"
if (length(res[[idx]]) > 0) {
for (i in 1:length(res[[idx]])) {
name.2 <- names(res[[idx]])[i]
if (!is.null(res[[idx]][[i]])) {
attr(res[[idx]][[i]], "inla.tag") <- paste(name.1, name.2)
class(res[[idx]][[i]]) <- "inla.marginal"
}
}
}
}
}
}
if (debug) {
print("...Fix marginals 1")
}
## all these have three levels:
idxs <- grep("marginals[.]random", names(res))
if (length(idxs) > 0) {
for (idx in idxs) {
if (!is.null(res[[idx]])) {
name.1 <- names(res)[idx]
name.2 <- names(res[[idx]])
if (length(res[[idx]]) > 0) {
for (i in 1:length(res[[idx]])) {
name.3 <- name.2[i]
name.4 <- names(res[[idx]][[i]])
attr(res[[idx]][[i]], "inla.tag") <- paste(name.1, name.3)
class(res[[idx]][[i]]) <- "inla.marginals"
if (length(res[[idx]][[i]]) > 0) {
for (j in 1:length(res[[idx]][[i]])) {
name.5 <- name.4[j]
if (!is.null(res[[idx]][[i]][[j]])) {
attr(res[[idx]][[i]][[j]], "inla.tag") <- paste(name.1, name.3, name.5)
class(res[[idx]][[i]][[j]]) <- "inla.marginal"
}
}
}
}
}
}
}
}
if (debug) {
print("...Fix marginals done.")
}
}
return(res)
}
## disable this for the moment
inla.internal.experimental.mode <- FALSE
`inla.collect.misc` <- function(dir, debug = FALSE) {
d <- paste(dir, "/misc", sep = "")
d.info <- file.info(d)$isdir
if (debug) {
print(paste("collect misc from", d))
}
if (is.na(d.info) || (d.info == FALSE)) {
return(NULL)
}
fnm <- paste(d, "/theta-tags", sep = "")
if (file.exists(fnm)) {
tags <- readLines(fnm)
} else {
tags <- NULL
}
fnm <- paste(d, "/theta-from", sep = "")
if (file.exists(fnm)) {
theta.from <- readLines(fnm)
## evaluate these as functions
theta.from <- lapply(theta.from, inla.source2function)
if (!is.null(tags)) {
names(theta.from) <- tags
}
} else {
theta.from <- NULL
}
fnm <- paste(d, "/theta-to", sep = "")
if (file.exists(fnm)) {
theta.to <- readLines(fnm)
## evaluate these as functions
theta.to <- lapply(theta.to, inla.source2function)
if (!is.null(tags)) {
names(theta.to) <- tags
}
} else {
theta.to <- NULL
}
fnm <- paste(d, "/covmat-hyper-internal.dat", sep = "")
if (file.exists(fnm)) {
siz <- inla.read.binary.file(fnm)
n <- siz[1L]
stopifnot(length(siz) == n^2L + 1L)
cov.intern <- matrix(siz[-1L], n, n)
dd <- diag(cov.intern)
s <- matrix(0.0, n, n)
diag(s) <- 1.0 / sqrt(dd)
cor.intern <- s %*% cov.intern %*% s
diag(cor.intern) <- 1.0
} else {
cov.intern <- NULL
cor.intern <- NULL
}
fnm <- paste(d, "/covmat-eigenvectors.dat", sep = "")
if (file.exists(fnm)) {
siz <- inla.read.binary.file(fnm)
n <- siz[1L]
stopifnot(length(siz) == n^2L + 1L)
cov.intern.eigenvectors <- matrix(siz[-1L], n, n)
} else {
cov.intern.eigenvectors <- NULL
}
fnm <- paste(d, "/covmat-eigenvalues.dat", sep = "")
if (file.exists(fnm)) {
siz <- inla.read.binary.file(fnm)
n <- siz[1L]
stopifnot(length(siz) == n + 1L)
cov.intern.eigenvalues <- siz[-1L]
} else {
cov.intern.eigenvalues <- NULL
}
fnm <- paste(d, "/reordering.dat", sep = "")
if (file.exists(fnm)) {
r <- as.integer(inla.read.binary.file(fnm))
} else {
r <- NULL
}
fnm <- paste(d, "/stdev_corr_pos.dat", sep = "")
if (file.exists(fnm)) {
stdev.corr.positive <- as.numeric(inla.read.fmesher.file(fnm))
} else {
stdev.corr.positive <- NULL
}
fnm <- paste(d, "/stdev_corr_neg.dat", sep = "")
if (file.exists(fnm)) {
stdev.corr.negative <- as.numeric(inla.read.fmesher.file(fnm))
} else {
stdev.corr.negative <- NULL
}
fnm <- paste(d, "/lincomb_derived_correlation_matrix.dat", sep = "")
if (file.exists(fnm)) {
lincomb.derived.correlation.matrix <- inla.read.fmesher.file(fnm)
} else {
lincomb.derived.correlation.matrix <- NULL
}
fnm <- paste(d, "/lincomb_derived_covariance_matrix.dat", sep = "")
if (file.exists(fnm)) {
lincomb.derived.covariance.matrix <- inla.read.fmesher.file(fnm)
} else {
lincomb.derived.covariance.matrix <- NULL
}
fnm <- paste(d, "/opt_directions.dat", sep = "")
if (file.exists(fnm)) {
opt.directions <- inla.read.fmesher.file(fnm)
n <- dim(opt.directions)[1]
colnames(opt.directions) <- paste0("dir:", 1:n)
rownames(opt.directions) <- paste0("theta:", 1:n)
} else {
opt.directions <- NULL
}
fnm <- paste(d, "/mode-status.dat", sep = "")
if (file.exists(fnm)) {
mode.status <- scan(fnm, quiet = TRUE)
} else {
mode.status <- NA
}
fnm <- paste(d, "/nfunc.dat", sep = "")
if (file.exists(fnm)) {
nfunc <- as.numeric(scan(fnm, quiet = TRUE))
} else {
nfunc <- NA
}
fnm <- paste(d, "/log-posterior-mode.dat", sep = "")
if (file.exists(fnm)) {
lpm <- scan(fnm, quiet = TRUE)
} else {
lpm <- NA
}
fnm <- paste(d, "/config/configs.dat", sep = "")
if (file.exists(fnm)) {
fp <- file(fnm, "rb")
iarr <- readBin(fp, integer(), 3)
configs <- list(
n = iarr[1],
nz = iarr[2],
ntheta = iarr[3]
)
configs.i <- readBin(fp, integer(), configs$nz) ## 0-based
configs.j <- readBin(fp, integer(), configs$nz) ## 0-based
configs$nconfig <- readBin(fp, integer(), 1)
nc <- readBin(fp, integer(), 1)
if (nc > 0) {
A <- readBin(fp, numeric(), configs$n * nc)
e <- readBin(fp, numeric(), nc)
configs$constr <- list(
nc = nc,
A = matrix(A, nc, configs$n),
e = e
)
} else {
configs$constr <- NULL
}
theta.tag <- readLines(paste(d, "/config/theta-tag.dat", sep = ""))
configs$contents <- list(
tag = readLines(paste(d, "/config/tag.dat", sep = "")),
start = as.integer(readLines(paste(d, "/config/start.dat", sep = ""))) + 1L,
length = as.integer(readLines(paste(d, "/config/n.dat", sep = "")))
)
if (configs$nconfig > 0L) {
configs$config[[configs$nconfig]] <- list()
for (k in 1L:configs$nconfig) {
log.post <- readBin(fp, numeric(), 1)
log.post.orig <- readBin(fp, numeric(), 1)
if (configs$ntheta > 0L) {
theta <- readBin(fp, numeric(), configs$ntheta)
names(theta) <- theta.tag
} else {
theta <- NULL
}
mean <- readBin(fp, numeric(), configs$n)
improved.mean <- readBin(fp, numeric(), configs$n)
skewness <- readBin(fp, numeric(), configs$n)
## add the offsets to the mean here
offsets <- readBin(fp, numeric(), configs$n)
mean <- mean + offsets
improved.mean <- improved.mean + offsets
Q <- readBin(fp, numeric(), configs$nz)
Qinv <- readBin(fp, numeric(), configs$nz)
Qprior <- readBin(fp, numeric(), configs$n)
dif <- which(configs$i != configs$j)
if (length(dif) > 0L) {
iadd <- configs.j[dif] ## yes, its the transpose part
jadd <- configs.i[dif] ## yes, its the transpose part
Qadd <- Q[dif]
Qinvadd <- Qinv[dif]
} else {
iadd <- c()
jadd <- c()
Qadd <- c()
Qinvadd <- c()
}
configs$config[[k]] <- list(
theta = theta,
log.posterior = log.post,
log.posterior.orig = log.post.orig,
mean = mean,
improved.mean = improved.mean,
skewness = skewness,
Q = sparseMatrix(
i = c(configs.i, iadd),
j = c(configs.j, jadd),
x = c(Q, Qadd),
dims = c(configs$n, configs$n),
index1 = FALSE,
giveCsparse = TRUE
),
Qinv = sparseMatrix(
i = c(configs.i, iadd),
j = c(configs.j, jadd),
x = c(Qinv, Qinvadd),
dims = c(configs$n, configs$n),
index1 = FALSE,
giveCsparse = TRUE
),
Qprior.diag = Qprior
)
}
## rescale the log.posteriors
configs$max.log.posterior <- max(sapply(configs$config, function(x) x$log.posterior.orig))
for (k in 1L:configs$nconfig) {
configs$config[[k]]$log.posterior <- configs$config[[k]]$log.posterior - configs$max.log.posterior
configs$config[[k]]$log.posterior.orig <- configs$config[[k]]$log.posterior.orig - configs$max.log.posterior
}
} else {
configs$config <- NULL
}
close(fp)
} else {
configs <- NULL
}
if (debug) {
print(paste("collect misc from", d, "...done"))
}
return(list(
cov.intern = cov.intern, cor.intern = cor.intern,
cov.intern.eigenvalues = cov.intern.eigenvalues, cov.intern.eigenvectors = cov.intern.eigenvectors,
reordering = r, theta.tags = tags, log.posterior.mode = lpm,
stdev.corr.negative = stdev.corr.negative, stdev.corr.positive = stdev.corr.positive,
to.theta = theta.to, from.theta = theta.from, mode.status = mode.status,
lincomb.derived.correlation.matrix = lincomb.derived.correlation.matrix,
lincomb.derived.covariance.matrix = lincomb.derived.covariance.matrix,
opt.directions = opt.directions,
configs = configs, nfunc = nfunc
))
}
`inla.collect.logfile` <- function(file.log = NULL, debug = FALSE) {
if (is.null(file.log)) {
return(list(logfile = NULL))
}
if (debug) {
print(paste("Read logfile", file.log))
}
if (file.exists(file.log)) {
## replace tab with spaces.........12345678....
return(list(logfile = gsub("\t", " ", readLines(file.log))))
} else {
return(list(logfile = NULL))
}
}
`inla.collect.size` <- function(dir, debug = FALSE) {
fnm <- paste(dir, "/size.dat", sep = "")
siz <- inla.read.binary.file(fnm)
if (length(siz) != 5L) {
return(rep(0L, 5))
## stop(paste("length of siz is not 5L: fnm=", fnm))
}
if (is.na(siz[1L]) || siz[1L] < 0L) stop("siz[1L] = NA")
if (is.na(siz[2L]) || siz[2L] <= 0L) siz[2L] <- siz[1L]
if (is.na(siz[3L]) || siz[3L] <= 0L) siz[3L] <- siz[2L]
if (is.na(siz[4L]) || siz[4L] <= 0L) siz[4L] <- 1L
if (is.na(siz[5L]) || siz[5L] <= 0L) siz[5L] <- 1L
return(list(n = siz[1L], N = siz[2L], Ntotal = siz[3L], ngroup = siz[4L], nrep = siz[5L]))
}
`inla.collect.hyperid` <- function(dir, debug = FALSE) {
fnm <- paste(dir, "/hyperid.dat", sep = "")
id <- readLines(fnm)
return(id)
}
`inla.collect.fixed` <- function(results.dir, debug = FALSE) {
alldir <- dir(results.dir)
if (debug) {
print("collect fixed effects")
}
## read FIXED EFFECTS
fix <- alldir[grep("^fixed.effect", alldir)]
fix <- c(fix, alldir[grep("^intercept$", alldir)])
n.fix <- length(fix)
## read the names of the fixed effects
if (n.fix > 0L) {
names.fixed <- inla.trim(character(n.fix))
for (i in 1L:n.fix) {
tag <- paste(results.dir, .Platform$file.sep, fix[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.fixed[i] <- "NameMissing"
} else {
names.fixed[i] <- readLines(tag, n = 1L)
}
}
## read summary the fixed effects
if (debug) {
print(names.fixed)
}
summary.fixed <- numeric()
marginals.fixed <- list()
marginals.fixed[[n.fix]] <- NA
for (i in 1L:n.fix) {
first.time <- (i == 1L)
file <- paste(results.dir, .Platform$file.sep, fix[i], sep = "")
dir.fix <- dir(file)
if (length(dir.fix) > 3L) {
summ <- inla.read.binary.file(paste(file, .Platform$file.sep, "summary.dat", sep = ""))[-1L]
if (first.time) {
col.nam <- c("mean", "sd")
}
## read quantiles if existing
if (length(grep("^quantiles.dat$", dir.fix)) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "quant", sep = ""))
}
}
## read mode if existing
if (length(grep("^mode.dat$", dir.fix)) > 0L) {
mm <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
summ <- c(summ, mm[, 2L])
if (first.time) {
col.nam <- c(col.nam, "mode")
}
}
if (length(grep("^cdf.dat$", dir.fix)) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "cdf", sep = ""))
}
}
## read also kld distance
kld.fixed <- inla.read.binary.file(paste(file, .Platform$file.sep, "symmetric-kld.dat", sep = ""))[-1L]
summ <- c(summ, kld.fixed)
if (first.time) {
col.nam <- c(col.nam, "kld")
}
summary.fixed <- rbind(summary.fixed, summ)
## read the marginals
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "marginal-densities.dat", sep = "")),
debug = debug
)
if (is.null(xx)) {
xx <- cbind(c(NA, NA, NA), c(NA, NA, NA))
}
colnames(xx) <- c("x", "y")
marginals.fixed[[i]] <- xx
if (inla.internal.experimental.mode) {
class(marginals.fixed[[i]]) <- "inla.marginal"
attr(marginals.fixed[[i]], "inla.tag") <- paste("marginal fixed", names.fixed[i])
}
} else {
if (first.time) {
col.nam <- c("mean", "sd", "kld")
}
summary.fixed <- rbind(summary.fixed, c(NA, NA, NA))
xx <- cbind(c(NA, NA, NA), c(NA, NA, NA))
colnames(xx) <- c("x", "y")
marginals.fixed[[i]] <- xx
if (inla.internal.experimental.mode) {
class(marginals.fixed[[i]]) <- "inla.marginal"
attr(marginals.fixed[[i]], "inla.tag") <- paste("marginal fixed", names.fixed[i])
}
}
}
rownames(summary.fixed) <- names.fixed
colnames(summary.fixed) <- col.nam
if (length(marginals.fixed) > 0L) {
names(marginals.fixed) <- names.fixed
}
} else {
if (debug) {
print("No fixed effects")
}
names.fixed <- NULL
summary.fixed <- NULL
marginals.fixed <- NULL
}
if (inla.internal.experimental.mode) {
class(marginals.fixed) <- "inla.marginals"
attr(marginals.fixed, "inla.tag", "marginals fixed")
}
ret <- list(
names.fixed = names.fixed,
summary.fixed = as.data.frame(summary.fixed),
marginals.fixed = marginals.fixed
)
return(ret)
}
`inla.collect.lincomb` <-
function(results.dir,
debug = FALSE,
derived = TRUE) {
## rewrite from collect.random
alldir <- dir(results.dir)
if (derived) {
lincomb <- alldir[grep("^lincomb.*derived[.]all", alldir)]
} else {
lincomb1 <- alldir[grep("^lincomb.*derived[.]all", alldir)]
lincomb2 <- alldir[grep("^lincomb", alldir)]
lincomb <- setdiff(lincomb2, lincomb1)
if (debug) {
print(paste("lincomb", lincomb))
}
}
n.lincomb <- length(lincomb)
if (debug) {
print("collect lincombs")
}
## read the names and model of the lincomb effects
if (n.lincomb > 0L) {
names.lincomb <- character(n.lincomb)
model.lincomb <- inla.trim(character(n.lincomb))
summary.lincomb <- list()
summary.lincomb[[n.lincomb]] <- NA
marginals.lincomb <- list()
marginals.lincomb[[n.lincomb]] <- NA
size.lincomb <- list()
size.lincomb[[n.lincomb]] <- NA
for (i in 1L:n.lincomb) {
if (debug) {
print(paste("read lincomb ", i, " of ", n.lincomb))
}
## read the summary
file <- paste(results.dir, .Platform$file.sep, lincomb[i], sep = "")
dir.lincomb <- dir(file)
if (debug) {
print(paste("read from dir ", file))
}
if (length(dir.lincomb) > 4L) {
dd <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "summary.dat", sep = "")),
ncol = 3L, byrow = TRUE
)
col.nam <- c("ID", "mean", "sd")
## read quantiles if existing
if (debug) {
cat("...quantiles.dat if any\n")
}
if (length(grep("^quantiles.dat$", dir.lincomb)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep,
"quantiles.dat",
sep = ""
)), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
}
## read mode if existing
if (length(grep("^mode.dat$", dir.lincomb)) > 0L) {
mm <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
len <- dim(mm)[2L]
qq <- mm[, seq(2L, len, by = 2L), drop = FALSE]
dd <- cbind(dd, t(qq))
col.nam <- c(col.nam, "mode")
}
## read cdf if existing
if (debug) {
cat("...cdf.dat if any\n")
}
if (length(grep("^cdf.dat$", dir.lincomb)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
}
if (debug) {
cat("...NAMES if any\n")
}
if (length(grep("^NAMES$", dir.lincomb)) == 1L) {
row.names <- readLines(paste(file, .Platform$file.sep, "NAMES", sep = ""))
## remove the prefix 'lincomb.' as we do not need it in the names.
row.names <- sapply(row.names, function(x) gsub("^lincomb[.]", "", x))
names(row.names) <- NULL
} else {
row.names <- NULL
}
## read kld
if (debug) {
cat("...kld\n")
}
kld1 <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "symmetric-kld.dat", sep = "")),
ncol = 2L, byrow = TRUE
)
qq <- kld1[, 2L, drop = FALSE]
dd <- cbind(dd, qq)
if (debug) {
cat("...kld done\n")
}
col.nam <- c(col.nam, "kld")
colnames(dd) <- col.nam
summary.lincomb[[i]] <- as.data.frame(dd)
if (!is.null(row.names)) {
rownames(summary.lincomb[[i]]) <- row.names
}
xx <- inla.read.binary.file(paste(file, .Platform$file.sep, "marginal-densities.dat", sep = ""))
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
nd <- length(rr)
names(rr) <- paste("index.", as.character(1L:nd), sep = "")
for (j in 1L:nd) {
colnames(rr[[j]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[j]]) <- "inla.marginal"
if (derived) {
attr(rr[[j]], "inla.tag") <- paste("marginal lincomb derived", names(rr)[j])
} else {
attr(rr[[j]], "inla.tag") <- paste("marginal lincomb", names(rr)[j])
}
}
}
}
marginals.lincomb[[i]] <- rr
if (!is.null(row.names) && (length(marginals.lincomb) > 0L)) {
names(marginals.lincomb[[i]]) <- row.names
}
} else {
N.file <- paste(file, .Platform$file.sep, "N", sep = "")
if (!file.exists(N.file)) {
N <- 0L
} else {
N <- scan(file = N.file, what = numeric(0L), quiet = TRUE)
}
summary.lincomb[[i]] <- data.frame("mean" = rep(NA, N), "sd" = rep(NA, N), "kld" = rep(NA, N))
marginals.lincomb <- NULL
}
size.lincomb[[i]] <- inla.collect.size(file)
if (inla.internal.experimental.mode) {
if (!is.null(marginals.lincomb)) {
class(marginals.lincomb[[i]]) <- "inla.marginals"
if (derived) {
attr(marginals.lincomb[[i]], "inla.tag") <- "marginal lincomb derived"
} else {
attr(marginals.lincomb[[i]], "inla.tag") <- "marginal lincomb"
}
}
}
}
names(summary.lincomb) <- names.lincomb
## could be that marginals.lincomb is a list of lists of NULL
if (!is.null(marginals.lincomb)) {
if (all(sapply(marginals.lincomb, is.null))) {
marginals.lincomb <- NULL
}
}
if (!is.null(marginals.lincomb) && (length(marginals.lincomb) > 0L)) {
names(marginals.lincomb) <- names.lincomb
}
} else {
if (debug) {
cat("No lincomb effets\n")
}
summary.lincomb <- NULL
marginals.lincomb <- NULL
size.lincomb <- NULL
}
if (derived) {
res <- list(
summary.lincomb.derived = as.data.frame(summary.lincomb[[1L]]),
marginals.lincomb.derived = inla.ifelse(length(marginals.lincomb) > 0L, marginals.lincomb[[1L]], NULL),
size.lincomb.derived = size.lincomb[[1L]]
)
} else {
res <- list(
summary.lincomb = as.data.frame(summary.lincomb[[1L]]),
marginals.lincomb = inla.ifelse(length(marginals.lincomb) > 0L, marginals.lincomb[[1L]], NULL),
size.lincomb = size.lincomb[[1L]]
)
}
return(res)
}
`inla.collect.cpo` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
if (length(grep("^cpo$", alldir)) == 1L) {
if (debug) {
cat(paste("collect cpo\n", sep = ""))
}
xx <- inla.read.binary.file(file = paste(results.dir, .Platform$file.sep, "cpo", .Platform$file.sep, "cpo.dat", sep = ""))
n <- xx[1L]
xx <- xx[-1L]
len <- length(xx)
cpo.res <- numeric(n)
cpo.res[1L:n] <- NA
cpo.res[xx[seq(1L, len, by = 2L)] + 1L] <- xx[seq(2L, len, by = 2L)]
xx <- inla.read.binary.file(file = paste(results.dir, .Platform$file.sep, "cpo", .Platform$file.sep, "pit.dat", sep = ""))
n <- xx[1L]
xx <- xx[-1L]
len <- length(xx)
pit.res <- numeric(n)
pit.res[1L:n] <- NA
pit.res[xx[seq(1L, len, by = 2L)] + 1L] <- xx[seq(2L, len, by = 2L)]
fnm <- paste(results.dir, .Platform$file.sep, "cpo", .Platform$file.sep, "failure.dat", sep = "")
if (file.exists(fnm)) {
xx <- inla.read.binary.file(fnm)
n <- xx[1L]
xx <- xx[-1L]
len <- length(xx)
failure.res <- numeric(n)
failure.res[1L:n] <- NA
failure.res[xx[seq(1L, len, by = 2L)] + 1L] <- xx[seq(2L, len, by = 2L)]
}
else {
failure.res <- NULL
}
rm(xx)
} else {
cpo.res <- NULL
pit.res <- NULL
failure.res <- NULL
}
## want NA not NaN
cpo.res[is.nan(cpo.res)] <- NA
pit.res[is.nan(pit.res)] <- NA
failure.res[is.nan(failure.res)] <- NA
return(list(cpo = cpo.res, pit = pit.res, failure = failure.res))
}
`inla.collect.po` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
if (length(grep("^po$", alldir)) == 1L) {
if (debug) {
cat(paste("collect po\n", sep = ""))
}
xx <- inla.read.binary.file(file = paste(results.dir, .Platform$file.sep, "po", .Platform$file.sep, "po.dat", sep = ""))
n <- xx[1L]
xx <- xx[-1L]
xx <- xx[-seq(3, length(xx), by = 3L)] ## skip entry 3, 6, 9, ...
len <- length(xx)
po.res <- numeric(n)
po.res[1L:n] <- NA
po.res[xx[seq(1L, len, by = 2L)] + 1L] <- xx[seq(2L, len, by = 2L)]
} else {
po.res <- NULL
}
## want NA not NaN
po.res[is.nan(po.res)] <- NA
return(list(po = po.res))
}
`inla.collect.waic` <-
function(results.dir,
debug = FALSE) {
## yes, here we use the po-results!!!!
alldir <- dir(results.dir)
if (length(grep("^po$", alldir)) == 1L) {
if (debug) {
cat(paste("collect waic from po-results\n", sep = ""))
}
xx <- inla.read.binary.file(file = paste(results.dir, .Platform$file.sep, "po", .Platform$file.sep, "po.dat", sep = ""))
n <- xx[1L]
xx <- xx[-1L]
len <- length(xx)
po.res <- numeric(n)
po2.res <- numeric(n)
po.res[1L:n] <- NA
po.res[xx[seq(1L, len, by = 3L)] + 1L] <- xx[seq(2L, len, by = 3L)]
po2.res[1L:n] <- NA
po2.res[xx[seq(1L, len, by = 3L)] + 1L] <- xx[seq(3L, len, by = 3L)]
## want NA not NaN
po.res[is.nan(po.res)] <- NA
po2.res[is.nan(po2.res)] <- NA
## compute waic
return(list(
waic = -2 * (sum(log(po.res), na.rm = TRUE) - sum(po2.res, na.rm = TRUE)),
p.eff = sum(po2.res, na.rm = TRUE),
local.waic = -2 * (log(po.res) - po2.res),
local.p.eff = po2.res
))
} else {
return(NULL)
}
}
`inla.collect.dic` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
## get dic (if exists)
if (length(grep("^dic$", alldir)) == 1L) {
if (debug) {
cat(paste("collect dic\n", sep = ""))
}
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "dic.dat", sep = "")
dic.values <- inla.read.binary.file(file)
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "deviance_e.dat", sep = "")
if (inla.is.fmesher.file(file)) {
dev.e <- c(inla.read.fmesher.file(file))
dev.e[is.nan(dev.e)] <- NA
} else {
dev.e <- NULL
}
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "deviance_e_sat.dat", sep = "")
if (inla.is.fmesher.file(file)) {
dev.e.sat <- c(inla.read.fmesher.file(file))
dev.e.sat[is.nan(dev.e.sat)] <- NA
} else {
dev.e.sat <- NULL
}
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "e_deviance.dat", sep = "")
if (inla.is.fmesher.file(file)) {
e.dev <- c(inla.read.fmesher.file(file))
e.dev[is.nan(e.dev)] <- NA
} else {
e.dev <- NULL
}
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "e_deviance_sat.dat", sep = "")
if (inla.is.fmesher.file(file)) {
e.dev.sat <- c(inla.read.fmesher.file(file))
e.dev.sat[is.nan(e.dev.sat)] <- NA
} else {
e.dev.sat <- NULL
}
f.idx <- NULL
file <- paste(results.dir, .Platform$file.sep, "dic", .Platform$file.sep, "family_idx.dat", sep = "")
if (inla.is.fmesher.file(file)) {
f.idx <- c(inla.read.fmesher.file(file)) + 1L ## convert to R-indexing
f.idx[is.nan(f.idx)] <- NA
}
## if there there is no data at all, then all dic'values are
## NA. the returned values are 0, so we override them here.
if (!is.null(f.idx) && all(is.na(f.idx))) {
dic.values[] <- NA
}
local.dic <- 2.0 * e.dev - dev.e
local.dic.sat <- 2.0 * e.dev.sat - dev.e.sat
local.p.eff <- e.dev - dev.e
fam.dic <- dic.values[4L]
fam.p.eff <- dic.values[3L]
if (!is.null(f.idx) && !all(is.na(f.idx))) {
n.fam <- max(f.idx, na.rm = TRUE)
fam.dic <- numeric(n.fam)
fam.dic.sat <- numeric(n.fam)
fam.p.eff <- numeric(n.fam)
for (i in 1:n.fam) {
idx <- which(f.idx == i)
fam.dic[i] <- sum(local.dic[idx])
fam.dic.sat[i] <- sum(local.dic.sat[idx])
fam.p.eff[i] <- sum(local.p.eff[idx])
}
}
dic <- list(
"dic" = dic.values[4L],
"p.eff" = dic.values[3L],
"mean.deviance" = dic.values[1L],
"deviance.mean" = dic.values[2L],
"dic.sat" = dic.values[4L + 4L],
"mean.deviance.sat" = dic.values[4L + 1L],
"deviance.mean.sat" = dic.values[4L + 2L],
"family.dic" = fam.dic,
"family.dic.sat" = fam.dic.sat,
"family.p.eff" = fam.p.eff,
"family" = f.idx,
"local.dic" = local.dic,
"local.dic.sat" = local.dic.sat,
"local.p.eff" = local.p.eff
)
} else {
dic <- NULL
}
return(dic)
}
`inla.collect.q` <-
function(results.dir,
debug = FALSE) {
my.read.pnm <- function(...) {
args <- list(...)
filename <- args[[1]]
if (file.exists(filename) && inla.require("pixmap")) {
## disable warnings
warn <- getOption("warn")
options(warn = -1L) ## disable...
ret <- pixmap::read.pnm(...)
do.call("options", args = list(warn = warn))
} else {
if (file.exists(filename)) {
warning("You need to install 'pixmap' to read bitmap files.")
}
ret <- NULL
}
return(ret)
}
alldir <- dir(results.dir)
if (length(grep("^Q$", alldir)) == 1L) {
if (debug) {
cat(paste("collect q\n", sep = ""))
}
file <- paste(results.dir, .Platform$file.sep, "Q/precision-matrix.pbm", sep = "")
Q.matrix <- my.read.pnm(file)
file <- paste(results.dir, .Platform$file.sep, "Q/precision-matrix-reordered.pbm", sep = "")
Q.matrix.reorder <- my.read.pnm(file)
file <- paste(results.dir, .Platform$file.sep, "Q/precision-matrix_L.pbm", sep = "")
L <- my.read.pnm(file)
if (is.null(Q.matrix) && is.null(Q.matrix.reorder) && is.null(L)) {
q <- NULL
} else {
q <- list(Q = Q.matrix, Q.reorder = Q.matrix.reorder, L = L)
}
} else {
q <- NULL
}
return(q)
}
`inla.collect.graph` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
if (length(grep("^graph.dat$", alldir)) == 1L) {
if (debug) {
cat(paste("collect graph\n", sep = ""))
}
file <- paste(results.dir, .Platform$file.sep, "graph.dat", sep = "")
g <- inla.read.graph(file)
} else {
g <- NULL
}
return(list(graph = g))
}
`inla.collect.hyperpar` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
all.hyper <- alldir[grep("^hyperparameter", alldir)]
hyper <- all.hyper[grep("user-scale$", all.hyper)]
n.hyper <- length(hyper)
if (n.hyper > 0L) {
## get names for hyperpar
names.hyper <- character(n.hyper)
for (i in 1L:n.hyper) {
tag <- paste(results.dir, .Platform$file.sep, hyper[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.hyper[i] <- "missing NAME"
} else {
names.hyper[i] <- readLines(tag, n = 1L)
}
}
## get summary and marginals
summary.hyper <- numeric()
marginal.hyper <- list()
marginal.hyper[[n.hyper]] <- NA
for (i in 1L:n.hyper) {
first.time <- (i == 1L)
dir.hyper <- paste(results.dir, .Platform$file.sep, hyper[i], sep = "")
file <- paste(dir.hyper, .Platform$file.sep, "summary.dat", sep = "")
hyperid <- inla.collect.hyperid(dir.hyper)
dd <- inla.read.binary.file(file)[-1L]
summ <- dd
if (first.time) {
col.nam <- c("mean", "sd")
}
if (length(grep("^quantiles.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "quant", sep = ""))
}
}
if (length(grep("^mode.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, "mode")
}
}
if (length(grep("^cdf.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "cdf", sep = ""))
}
}
summary.hyper <- rbind(summary.hyper, summ)
file <- paste(results.dir, .Platform$file.sep, hyper[i], .Platform$file.sep, "marginal-densities.dat", sep = "")
xx <- inla.read.binary.file(file)
marg1 <- inla.interpret.vector(xx, debug = debug)
attr(marg1, "hyperid") <- hyperid
rm(xx)
if (!is.null(marg1)) {
colnames(marg1) <- c("x", "y")
}
if (inla.internal.experimental.mode) {
class(marg1) <- "inla.marginal"
attr(marg1, "inla.tag") <- paste("marginal hyper", names.hyper[i])
}
marginal.hyper[[i]] <- marg1
}
names(marginal.hyper) <- names.hyper
rownames(summary.hyper) <- names.hyper
colnames(summary.hyper) <- col.nam
} else {
marginal.hyper <- NULL
summary.hyper <- NULL
}
if (inla.internal.experimental.mode) {
if (!is.null(marginal.hyper)) {
class(marginal.hyper) <- "inla.marginals"
attr(marginal.hyper, "inla.tag") <- "marginal hyper"
}
}
## collect also the hyperparameters in the internal scale
all.hyper <- alldir[grep("^hyperparameter", alldir)]
hyper <- all.hyper[-grep("user-scale$", all.hyper)]
n.hyper <- length(hyper)
if (n.hyper > 0L) {
## get names for hyperpar
names.hyper <- character(n.hyper)
for (i in 1L:n.hyper) {
tag <- paste(results.dir, .Platform$file.sep, hyper[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.hyper[i] <- "missing NAME"
} else {
names.hyper[i] <- readLines(tag, n = 1L)
}
}
## get summary and marginals
internal.summary.hyper <- numeric()
internal.marginal.hyper <- list()
internal.marginal.hyper[[n.hyper]] <- NA
for (i in 1L:n.hyper) {
first.time <- (i == 1L)
dir.hyper <- paste(results.dir, .Platform$file.sep, hyper[i], sep = "")
file <- paste(dir.hyper, .Platform$file.sep, "summary.dat", sep = "")
hyperid <- inla.collect.hyperid(dir.hyper)
dd <- inla.read.binary.file(file)[-1L]
summ <- dd
if (first.time) {
col.nam <- c("mean", "sd")
}
if (length(grep("^quantiles.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "quant", sep = ""))
}
}
if (length(grep("^mode.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, "mode")
}
}
if (length(grep("^cdf.dat$", dir(dir.hyper))) > 0L) {
qq <- inla.interpret.vector(inla.read.binary.file(paste(dir.hyper, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
summ <- c(summ, qq[, 2L])
if (first.time) {
col.nam <- c(col.nam, paste(as.character(qq[, 1L]), "cdf", sep = ""))
}
}
if (first.time) {
internal.summary.hyper <- matrix(NA, n.hyper, length(summ))
}
internal.summary.hyper[i, ] <- summ
file <- paste(results.dir, .Platform$file.sep, hyper[i], .Platform$file.sep, "marginal-densities.dat", sep = "")
xx <- inla.read.binary.file(file)
marg1 <- inla.interpret.vector(xx, debug = debug)
attr(marg1, "hyperid") <- hyperid
rm(xx)
if (!is.null(marg1)) {
colnames(marg1) <- c("x", "y")
}
if (inla.internal.experimental.mode) {
class(marg1) <- "inla.marginal"
attr(marg1, "inla.tag") <- paste("marginal hyper internal", names.hyper[i])
}
internal.marginal.hyper[[i]] <- marg1
}
names(internal.marginal.hyper) <- names.hyper
rownames(internal.summary.hyper) <- names.hyper
colnames(internal.summary.hyper) <- col.nam
} else {
internal.summary.hyper <- NULL
internal.marginal.hyper <- NULL
}
if (inla.internal.experimental.mode) {
if (!is.null(internal.marginal.hyper)) {
class(internal.marginal.hyper) <- "inla.marginals"
attr(internal.marginal.hyper, "inla.tag") <- "marginal hyper internal"
}
}
ret <- list(
summary.hyperpar = as.data.frame(summary.hyper),
marginals.hyperpar = marginal.hyper,
internal.summary.hyperpar = as.data.frame(internal.summary.hyper),
internal.marginals.hyperpar = internal.marginal.hyper
)
return(ret)
}
`inla.collect.mlik` <-
function(results.dir,
debug = FALSE) {
alldir <- dir(results.dir)
if (length(grep("^marginal-likelihood$", alldir)) == 1L) {
if (debug) {
cat(paste("collect mlik\n", sep = ""))
}
file <- paste(results.dir, .Platform$file.sep, "marginal-likelihood",
.Platform$file.sep, "marginal-likelihood.dat",
sep = ""
)
mlik.res <- matrix(inla.read.binary.file(file), 2L, 1L)
rownames(mlik.res) <- c(
"log marginal-likelihood (integration)",
"log marginal-likelihood (Gaussian)"
)
}
else {
mlik.res <- NULL
}
return(list(mlik = mlik.res))
}
`inla.collect.predictor` <-
function(results.dir,
return.marginals.predictor = TRUE,
debug = FALSE) {
alldir <- dir(results.dir)
## FIRST: get the linear predictor
subdir <- paste(results.dir, .Platform$file.sep, "predictor", sep = "")
if (length(dir(subdir)) > 3L) {
if (debug) {
cat(paste("collect linear predictor\n", sep = ""))
}
if (debug) {
cat("...read summary.dat\n")
}
file <- paste(subdir, .Platform$file.sep, "summary.dat", sep = "")
dd <- matrix(inla.read.binary.file(file = file), ncol = 3L, byrow = TRUE)[, -1L, drop = FALSE]
col.nam <- c("mean", "sd")
## info about size
size.info <- inla.collect.size(subdir)
if (!is.null(size.info)) {
A <- (size.info$nrep == 2)
n <- size.info$n
nA <- size.info$Ntotal - size.info$n
} else {
## should not happen
stop("This should not happen")
}
## get quantiles if computed
if (length(grep("^quantiles.dat$", dir(subdir))) == 1L) {
if (debug) {
cat("...read quantiles.dat\n")
}
file <- paste(subdir, .Platform$file.sep, "quantiles.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
rm(xx)
}
if (length(grep("^mode.dat$", dir(subdir))) == 1L) {
if (debug) {
cat("...read mode.dat\n")
}
file <- paste(subdir, .Platform$file.sep, "mode.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L)]
col.nam <- c(col.nam, "mode")
dd <- cbind(dd, qq)
rm(xx)
}
## get cdf if computed
if (length(grep("^cdf.dat$", dir(subdir))) == 1L) {
if (debug) {
cat("...read cdf.dat\n")
}
file <- paste(subdir, .Platform$file.sep, "cdf.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
rm(xx)
} else {
if (debug) {
cat("... no cdf.dat\n")
}
}
## get kld
if (debug) {
cat("...read kld\n")
}
kld <- matrix(inla.read.binary.file(file = paste(subdir, .Platform$file.sep, "symmetric-kld.dat", sep = "")),
ncol = 2L, byrow = TRUE
)
dd <- cbind(dd, kld[, 2L, drop = FALSE])
col.nam <- c(col.nam, "kld")
colnames(dd) <- col.nam
summary.linear.predictor <- as.data.frame(dd)
if (A) {
rownames(summary.linear.predictor) <- c(
paste("APredictor.", inla.num(1L:nA), sep = ""),
paste("Predictor.", inla.num(1:n), sep = "")
)
} else {
rownames(summary.linear.predictor) <- paste("Predictor.", inla.num(1L:size.info$Ntotal), sep = "")
}
if (return.marginals.predictor) {
if (debug) {
cat("...read marginal-densities.dat\n")
}
file <- paste(subdir, .Platform$file.sep, "marginal-densities.dat", sep = "")
xx <- inla.read.binary.file(file)
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
if (A) {
names(rr) <- c(
paste("APredictor.", inla.num(1L:nA), sep = ""),
paste("Predictor.", inla.num(1L:n), sep = "")
)
} else {
names(rr) <- paste("Predictor.", as.character(1L:length(rr)), sep = "")
}
names.rr <- names(rr)
for (i in 1L:length(rr)) {
colnames(rr[[i]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[i]]) <- "inla.marginal"
attr(rr[[i]], "inla.tag") <- paste("marginal linear predictor", names.rr[i])
}
}
}
if (inla.internal.experimental.mode) {
class(rr) <- "inla.marginals"
attr(rr, "inla.tag") <- "marginals linear predictor"
}
marginals.linear.predictor <- rr
} else {
marginals.linear.predictor <- NULL
}
} else {
summary.linear.predictor <- NULL
marginals.linear.predictor <- NULL
size.info <- NULL
}
## SECOND: get the inverse linear predictor(if computed)
if (length(grep("^predictor-user-scale$", alldir)) == 1L) {
subdir <- paste(results.dir, .Platform$file.sep, "predictor-user-scale", sep = "")
if (length(dir(subdir)) > 3L) {
if (debug) {
cat(paste("collect fitted values\n", sep = ""))
}
file <- paste(subdir, .Platform$file.sep, "summary.dat", sep = "")
dd <- matrix(inla.read.binary.file(file = file), ncol = 3L, byrow = TRUE)[, -1L, drop = FALSE]
col.nam <- c("mean", "sd")
## get quantiles if computed
if (length(grep("^quantiles.dat$", dir(subdir))) == 1L) {
file <- paste(subdir, .Platform$file.sep, "quantiles.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
rm(xx)
}
if (length(grep("^mode.dat$", dir(subdir))) == 1L) {
file <- paste(subdir, .Platform$file.sep, "mode.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L)]
col.nam <- c(col.nam, "mode")
dd <- cbind(dd, qq)
rm(xx)
}
## get cdf if computed
if (length(grep("^cdf.dat$", dir(subdir))) == 1L) {
file <- paste(subdir, .Platform$file.sep, "cdf.dat", sep = "")
xx <- inla.interpret.vector(inla.read.binary.file(file), debug = debug)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
rm(xx)
}
colnames(dd) <- col.nam
if (A) {
rownames(dd) <- c(
paste("fitted.APredictor.", inla.num(1L:nA), sep = ""),
paste("fitted.Predictor.", inla.num(1L:n), sep = "")
)
} else {
rownames(dd) <- paste("fitted.Predictor.", inla.num(1L:n), sep = "")
}
summary.fitted.values <- as.data.frame(dd)
if (return.marginals.predictor) {
file <- paste(subdir, .Platform$file.sep, "marginal-densities.dat", sep = "")
xx <- inla.read.binary.file(file)
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
if (A) {
names(rr) <- c(
paste("fitted.APredictor.", inla.num(1L:nA), sep = ""),
paste("fitted.Predictor.", inla.num(1:n), sep = "")
)
} else {
names(rr) <- paste("fitted.Predictor.", inla.num(1L:length(rr)), sep = "")
}
names.rr <- names(rr)
for (i in 1L:length(rr)) {
colnames(rr[[i]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[i]]) <- "inla.marginal"
attr(rr[[i]], "inla.tag") <- paste("marginal fitted values", names.rr[i])
}
}
}
if (inla.internal.experimental.mode) {
class(rr) <- "inla.marginals"
attr(rr, "inla.tag") <- "marginals fitted values"
}
marginals.fitted.values <- rr
} else {
marginals.fitted.values <- NULL
}
} else {
summary.fitted.values <- NULL
marginals.fitted.values <- NULL
}
} else {
summary.fitted.values <- NULL
marginals.fitted.values <- NULL
}
res <- list(
summary.linear.predictor = as.data.frame(summary.linear.predictor),
marginals.linear.predictor = marginals.linear.predictor,
summary.fitted.values = as.data.frame(summary.fitted.values),
marginals.fitted.values = marginals.fitted.values,
size.linear.predictor = size.info
)
return(res)
}
`inla.collect.random` <-
function(results.dir,
return.marginals.random,
debug = FALSE) {
alldir <- dir(results.dir)
random <- alldir[grep("^random.effect", alldir)]
n.random <- length(random)
if (debug) {
print("collect random effects")
}
## read the names and model of the random effects
if (n.random > 0L) {
names.random <- character(n.random)
model.random <- inla.trim(character(n.random))
for (i in 1L:n.random) {
tag <- paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.random[i] <- "missing NAME"
} else {
names.random[i] <- readLines(tag, n = 1L)
}
modelname <- inla.trim(paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "MODEL", sep = ""))
if (!file.exists(modelname)) {
model.random[i] <- "NoModelName"
} else {
model.random[i] <- inla.trim(readLines(modelname, n = 1L))
}
}
summary.random <- list()
summary.random[[n.random]] <- NA
size.random <- list()
size.random[[n.random]] <- NA
if (return.marginals.random) {
marginals.random <- list()
marginals.random[[n.random]] <- NA
} else {
marginals.random <- NULL
}
for (i in 1L:n.random) {
if (debug) {
print(paste("read random ", i, " of ", n.random))
}
## read the summary
file <- paste(results.dir, .Platform$file.sep, random[i], sep = "")
dir.random <- dir(file)
if (length(dir.random) > 5L) {
dd <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "summary.dat", sep = "")), ncol = 3L, byrow = TRUE)
col.nam <- c("ID", "mean", "sd")
## read quantiles if existing
if (debug) {
cat("...quantiles.dat if any\n")
}
if (length(grep("^quantiles.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
}
if (length(grep("^mode.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L)]
col.nam <- c(col.nam, "mode")
dd <- cbind(dd, qq)
}
## read cdf if existing
if (debug) {
cat("...cdf.dat if any\n")
}
if (length(grep("^cdf.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
}
## read kld
if (debug) {
cat("...kld\n")
}
kld1 <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "symmetric-kld.dat", sep = "")),
ncol = 2L, byrow = TRUE
)
qq <- kld1[, 2L, drop = FALSE]
dd <- cbind(dd, qq)
if (debug) {
cat("...kld done\n")
}
col.nam <- c(col.nam, "kld")
colnames(dd) <- col.nam
summary.random[[i]] <- as.data.frame(dd)
if (return.marginals.random) {
xx <- inla.read.binary.file(paste(file, .Platform$file.sep, "marginal-densities.dat", sep = ""))
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
nd <- length(rr)
names(rr) <- paste("index.", as.character(1L:nd), sep = "")
names.rr <- names(rr)
for (j in 1L:nd) {
colnames(rr[[j]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[j]]) <- "inla.marginal"
attr(rr[[j]], "inla.tag") <- paste("marginal random", names.random[i], names.rr[j])
}
}
}
if (inla.internal.experimental.mode) {
class(rr) <- "inla.marginals"
attr(rr, "inla.tag") <- paste("marginals random", names.random[i])
}
marginals.random[[i]] <- if (is.null(rr)) NA else rr
} else {
stopifnot(is.null(marginals.random))
}
## if id.names are present, override the default names
id.names <- inla.readLines(paste(file, .Platform$file.sep, "id-names.dat", sep = ""))
if (!is.null(id.names)) {
len.id.names <- length(id.names)
summary.random[[i]]$ID[1L:len.id.names] <- id.names
if (length(marginals.random) >= i && !is.na(marginals.random[[i]])) {
names(marginals.random[[i]][1L:len.id.names]) <- id.names
}
}
} else {
N.file <- paste(file, .Platform$file.sep, "N", sep = "")
if (!file.exists(N.file)) {
N <- 0L
} else {
N <- scan(file = N.file, what = numeric(0L), quiet = TRUE)
}
summary.random[[i]] <- data.frame("mean" = rep(NA, N), "sd" = rep(NA, N), "kld" = rep(NA, N))
marginals.random <- NULL
}
size.random[[i]] <- inla.collect.size(file)
}
names(summary.random) <- names.random
## could be that marginals.random is a list of lists of NULL or NA
if (!is.null(marginals.random)) {
if (all(sapply(marginals.random, function(x) (is.null(x) || is.na(x))))) {
marginals.random <- NULL
}
}
if (!is.null(marginals.random) && (length(marginals.random) > 0L)) {
names(marginals.random) <- names.random
}
} else {
if (debug) {
cat("No random effets\n")
}
model.random <- NULL
summary.random <- NULL
marginals.random <- NULL
size.random <- NULL
}
res <- list(
model.random = model.random,
summary.random = lapply(summary.random, as.data.frame),
marginals.random = marginals.random,
size.random = size.random
)
return(res)
}
`inla.collect.spde2.blc` <-
function(results.dir,
return.marginals.random,
debug = FALSE) {
## a copy from collect.random
alldir <- dir(results.dir)
random <- alldir[grep("^spde2.blc", alldir)]
n.random <- length(random)
if (debug) {
print("collect random effects")
}
## read the names and model of the random effects
if (n.random > 0L) {
names.random <- character(n.random)
model.random <- inla.trim(character(n.random))
for (i in 1L:n.random) {
tag <- paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.random[i] <- "missing NAME"
} else {
names.random[i] <- readLines(tag, n = 1L)
}
modelname <- inla.trim(paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "MODEL", sep = ""))
if (!file.exists(modelname)) {
model.random[i] <- "NoModelName"
} else {
model.random[i] <- inla.trim(readLines(modelname, n = 1L))
}
}
summary.random <- list()
summary.random[[n.random]] <- NA
size.random <- list()
size.random[[n.random]] <- NA
if (return.marginals.random) {
marginals.random <- list()
marginals.random[[n.random]] <- NA
} else {
marginals.random <- NULL
}
for (i in 1L:n.random) {
if (debug) {
print(paste("read random ", i, " of ", n.random))
}
## read the summary
file <- paste(results.dir, .Platform$file.sep, random[i], sep = "")
dir.random <- dir(file)
if (length(dir.random) > 4L) {
dd <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "summary.dat", sep = "")), ncol = 3L, byrow = TRUE)
col.nam <- c("ID", "mean", "sd")
## read quantiles if existing
if (debug) {
cat("...quantiles.dat if any\n")
}
if (length(grep("^quantiles.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
}
if (length(grep("^mode.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, "mode")
dd <- cbind(dd, t(qq))
}
## read cdf if existing
if (debug) {
cat("...cdf.dat if any\n")
}
if (length(grep("^cdf.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
}
## read kld
if (debug) {
cat("...kld\n")
}
kld1 <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "symmetric-kld.dat", sep = "")),
ncol = 2L, byrow = TRUE
)
qq <- kld1[, 2L, drop = FALSE]
dd <- cbind(dd, qq)
if (debug) {
cat("...kld done\n")
}
col.nam <- c(col.nam, "kld")
colnames(dd) <- col.nam
summary.random[[i]] <- as.data.frame(dd)
if (return.marginals.random) {
xx <- inla.read.binary.file(paste(file, .Platform$file.sep, "marginal-densities.dat", sep = ""))
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
nd <- length(rr)
names(rr) <- paste("index.", as.character(1L:nd), sep = "")
names.rr <- names(rr)
for (j in 1L:nd) {
colnames(rr[[j]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[j]]) <- "inla.marginal"
attr(rr[[j]], "inla.tag") <- paste("marginal random", names.random[i], names.rr[j])
}
}
}
if (inla.internal.experimental.mode) {
class(rr) <- "inla.marginals"
attr(rr, "inla.tag") <- paste("marginals random", names.random[i])
}
marginals.random[[i]] <- rr
} else {
stopifnot(is.null(marginals.random))
}
} else {
N.file <- paste(file, .Platform$file.sep, "N", sep = "")
if (!file.exists(N.file)) {
N <- 0L
} else {
N <- scan(file = N.file, what = numeric(0L), quiet = TRUE)
}
summary.random[[i]] <- data.frame("mean" = rep(NA, N), "sd" = rep(NA, N), "kld" = rep(NA, N))
marginals.random <- NULL
}
size.random[[i]] <- inla.collect.size(file)
}
names(summary.random) <- names.random
## could be that marginals.random is a list of lists of NULL or NA
if (!is.null(marginals.random)) {
if (all(sapply(marginals.random, function(x) (is.null(x) || is.na(x))))) {
marginals.random <- NULL
}
}
if (!is.null(marginals.random) && (length(marginals.random) > 0L)) {
names(marginals.random) <- names.random
}
} else {
if (debug) {
cat("No random effets\n")
}
model.random <- NULL
summary.random <- NULL
marginals.random <- NULL
size.random <- NULL
}
res <- list(
model.spde2.blc = model.random,
summary.spde2.blc = lapply(summary.random, as.data.frame),
marginals.spde2.blc = marginals.random,
size.spde2.blc = size.random
)
return(res)
}
`inla.collect.spde3.blc` <-
function(results.dir,
return.marginals.random,
debug = FALSE) {
## a copy from collect.random
alldir <- dir(results.dir)
random <- alldir[grep("^spde3.blc", alldir)]
n.random <- length(random)
if (debug) {
print("collect random effects")
}
## read the names and model of the random effects
if (n.random > 0L) {
names.random <- character(n.random)
model.random <- inla.trim(character(n.random))
for (i in 1L:n.random) {
tag <- paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "TAG", sep = "")
if (!file.exists(tag)) {
names.random[i] <- "missing NAME"
} else {
names.random[i] <- readLines(tag, n = 1L)
}
modelname <- inla.trim(paste(results.dir, .Platform$file.sep, random[i], .Platform$file.sep, "MODEL", sep = ""))
if (!file.exists(modelname)) {
model.random[i] <- "NoModelName"
} else {
model.random[i] <- inla.trim(readLines(modelname, n = 1L))
}
}
summary.random <- list()
summary.random[[n.random]] <- NA
size.random <- list()
size.random[[n.random]] <- NA
if (return.marginals.random) {
marginals.random <- list()
marginals.random[[n.random]] <- NA
} else {
marginals.random <- NULL
}
for (i in 1L:n.random) {
if (debug) {
print(paste("read random ", i, " of ", n.random))
}
## read the summary
file <- paste(results.dir, .Platform$file.sep, random[i], sep = "")
dir.random <- dir(file)
if (length(dir.random) > 4L) {
dd <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "summary.dat", sep = "")), ncol = 3L, byrow = TRUE)
col.nam <- c("ID", "mean", "sd")
## read quantiles if existing
if (debug) {
cat("...quantiles.dat if any\n")
}
if (length(grep("^quantiles.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "quantiles.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), "quant", sep = ""))
dd <- cbind(dd, t(qq))
}
if (length(grep("^mode.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "mode.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, "mode")
dd <- cbind(dd, t(qq))
}
## read cdf if existing
if (debug) {
cat("...cdf.dat if any\n")
}
if (length(grep("^cdf.dat$", dir.random)) == 1L) {
xx <- inla.interpret.vector(inla.read.binary.file(paste(file, .Platform$file.sep, "cdf.dat", sep = "")),
debug = debug
)
len <- dim(xx)[2L]
qq <- xx[, seq(2L, len, by = 2L), drop = FALSE]
col.nam <- c(col.nam, paste(as.character(xx[, 1L]), " cdf", sep = ""))
dd <- cbind(dd, t(qq))
}
## read kld
if (debug) {
cat("...kld\n")
}
kld1 <- matrix(inla.read.binary.file(file = paste(file, .Platform$file.sep, "symmetric-kld.dat", sep = "")),
ncol = 2L, byrow = TRUE
)
qq <- kld1[, 2L, drop = FALSE]
dd <- cbind(dd, qq)
if (debug) {
cat("...kld done\n")
}
col.nam <- c(col.nam, "kld")
colnames(dd) <- col.nam
summary.random[[i]] <- as.data.frame(dd)
if (return.marginals.random) {
xx <- inla.read.binary.file(paste(file, .Platform$file.sep, "marginal-densities.dat", sep = ""))
rr <- inla.interpret.vector.list(xx, debug = debug)
rm(xx)
if (!is.null(rr)) {
nd <- length(rr)
names(rr) <- paste("index.", as.character(1L:nd), sep = "")
names.rr <- names(rr)
for (j in 1L:nd) {
colnames(rr[[j]]) <- c("x", "y")
if (inla.internal.experimental.mode) {
class(rr[[j]]) <- "inla.marginal"
attr(rr[[j]], "inla.tag") <- paste("marginal random", names.random[i], names.rr[j])
}
}
}
if (inla.internal.experimental.mode) {
class(rr) <- "inla.marginals"
attr(rr, "inla.tag") <- paste("marginals random", names.random[i])
}
marginals.random[[i]] <- rr
} else {
stopifnot(is.null(marginals.random))
}
} else {
N.file <- paste(file, .Platform$file.sep, "N", sep = "")
if (!file.exists(N.file)) {
N <- 0L
} else {
N <- scan(file = N.file, what = numeric(0L), quiet = TRUE)
}
summary.random[[i]] <- data.frame("mean" = rep(NA, N), "sd" = rep(NA, N), "kld" = rep(NA, N))
marginals.random <- NULL
}
size.random[[i]] <- inla.collect.size(file)
}
names(summary.random) <- names.random
## could be that marginals.random is a list of lists of NULL or NA
if (!is.null(marginals.random)) {
if (all(sapply(marginals.random, function(x) (is.null(x) || is.na(x))))) {
marginals.random <- NULL
}
}
if (!is.null(marginals.random) && (length(marginals.random) > 0L)) {
names(marginals.random) <- names.random
}
} else {
if (debug) {
cat("No random effets\n")
}
model.random <- NULL
summary.random <- NULL
marginals.random <- NULL
size.random <- NULL
}
res <- list(
model.spde3.blc = model.random,
summary.spde3.blc = lapply(summary.random, as.data.frame),
marginals.spde3.blc = marginals.random,
size.spde3.blc = size.random
)
return(res)
}
`inla.image.reduce` <- function(im, image.dim = 512) {
## reduce image IM to image.dim IMAGE.DIM and return the image as a matrix.
## order the indices so the output can be plotted by image()
if ((class(im) != "pixmapGrey") || (im@size[1L] != im@size[2L])) {
return(im)
} else {
return(im@grey)
}
## do not need this anymore as we do this in GMRFLib.
if (FALSE) {
if (image.dim >= im@size[1L]) {
n <- as.integer(im@size[1L])
x <- matrix(NA, n, n)
for (j in 1L:n) {
x[j, n - (1L:n) + 1L] <- im@grey[1L:n, j]
}
return(x)
}
block <- ceiling(im@size[1L] / image.dim)
n <- floor(im@size[1L] / block)
ii <- jj <- 0L
x <- matrix(NA, n, n)
for (i in seq(1L, im@size[1L] - block + 1L, by = block)) {
ii <- ii + 1L
jj <- 0L
for (j in seq(1L, im@size[1L] - block + 1L, by = block)) {
jj <- jj + 1L
x[jj, n - ii + 1L] <- min(im@grey[i:(i + block - 1L), j:(j + block - 1L)])
}
}
return(x)
}
}
`inla.collect.offset.linear.predictor` <- function(results.dir, debug = FALSE) {
filename <- paste(results.dir, "/totaloffset/totaloffset.dat", sep = "")
stopifnot(file.exists(filename))
xx <- inla.read.binary.file(filename)
return(list(offset.linear.predictor = xx))
}
|
7add26d28ac249d4d1c400e56487ef6f4dd3f237 | 04b5f50c5ba46c41ccc49a4474aec6d17061c2e8 | /cluster final cut.R | 8c2b641537e543f84470a9e98d07c3c4c4fd6486 | [] | no_license | rahav08/House-sales-prediction-final | 182481828d1146c734305589ce868a9c8f27a9b3 | b330bac725e4a74a88c89f3a89acc0d1acff5eb8 | refs/heads/master | 2022-04-20T09:44:08.158249 | 2020-04-19T06:33:14 | 2020-04-19T06:33:14 | 256,930,858 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,221 | r | cluster final cut.R | House <- read.csv("Downloads/House_Price_data (1).csv",header=TRUE,sep=",")
Utilities <- House$Utilities
Heating <- House$Heating
HeatingQC <- House$HeatingQC
GrLivArea <- (House$GrLivArea)
Fullbath <- House$FullBath
Halfbath <- House$HalfBath
Bedroomabvgr <- House$BedroomAbvGr
Kitchenabvgr <- House$KitchenAbvGr
KitchenQual<- House$KitchenQual
GarageType <- House$GarageType
FireplaceQu <- House$FireplaceQu
CentralAir <- House$CentralAir
head(Utilities)
BldgType <- House$BldgType
levels(BldgType)
dummy_heating <- as.numeric(HeatingQC == 'Ex'|HeatingQC == 'Fa'|HeatingQC == 'Gd')
dummy_KitchenQual<- as.numeric(HeatingQC == 'Ex'|KitchenQual == 'Fa'| KitchenQual == 'Gd'|
KitchenQual=='TA')
dummy_fire <- as.numeric(FireplaceQu == 'Ex'|FireplaceQu == 'Fa'|FireplaceQu == 'Gd')
dummy_air<- as.numeric(CentralAir=='Y')
dummy_uti<- as.numeric(Utilities=='AllPub')
dummy_gar<-as.numeric(GarageType=='Attchd'|GarageType=='Detchd')
library(dplyr)
Quality <- House$OverallQual
Year<- House$YearBuilt
Hs <- House$HouseStyle
Hs<- as.factor(Hs)
final_data <- data.frame(Year,GrLivArea,BldgType,House$SalePrice,Hs,Quality)
df1 <- (filter(final_data,dummy_uti==1,dummy_air==1,dummy_fire==1,
dummy_KitchenQual==1,dummy_heating==1,
Fullbath>=1,Halfbath>=1,GrLivArea>=1000,
Bedroomabvgr>=1,Kitchenabvgr>=1))
head(df1)
tail(df1)
df1<-na.omit(df1)
print(is.factor(df1$BldgType))
df1$Hs<-as.factor(df1$Hs)
df1$Hs<- as.numeric(df1$Hs)
df1$BldgType<- as.numeric(df1$BldgType)
n_clusters <- 5
clusters <- kmeans(df1[,(1:2)], n_clusters, nstart = 30)
clusters <- factor(clusters$cluster)
print(table(df1$BldgType, clusters))
library(ggplot2)
library(plotly)
Hs <- factor(Hs)
g <- ggplot(data = df1,
aes(x = GrLivArea,
y = House.SalePrice,
colour = clusters))+
geom_point(data = df1,
aes(size=1))+theme_classic()+
theme(legend.background = element_rect(fill="lightyellow",
size=0.5, linetype="solid"))+
ggtitle("Clustering Between Living Area and Sale Price") +
xlab("Living Area (sqft)") + ylab("Sale Price")
ggplotly(g)
|
07a5569cb548a265a97b06d634a0fb73e47c044a | 94036c48173e2eeff4dfecd6bb1b515026363bdf | /feeds.R | db2abacde6b30101a6f74f5e4fe36f1fc63aa289 | [] | no_license | wilsonfreitas/brbloggers-backend | c9b75d577d4e6da8e3f20c779e61a693b290cedb | 6d22293b92d9e3e23c2d63a0da605f3813dfed06 | refs/heads/master | 2021-04-12T12:07:48.826661 | 2017-06-16T13:26:48 | 2017-06-16T13:26:48 | 94,549,116 | 0 | 1 | null | 2017-06-16T13:58:14 | 2017-06-16T13:58:14 | null | UTF-8 | R | false | false | 769 | r | feeds.R | feeds <- list(
"curso-r" = list(
nome = "Curso-R",
url = "http://curso-r.com/blog/index.xml"
),
"paixao-por-dados" = list(
nome = "Paixão por Dados",
url = "https://sillasgonzaga.github.io/feed.xml"
),
"analise-real" = list(
nome = "Análise Real",
url = "https://analisereal.com/feed/"
),
"dfalbel" = list(
nome = "Como faz no R",
url = "http://dfalbel.github.io/feed.xml"
),
"lurodrigo" = list(
nome = "Luiz Rodrigo",
url = "http://lurodrigo.com/feed.R_pt.xml"
),
"cantinho-do-r" = list(
nome = "Cantinho do R",
url = "https://cantinhodor.wordpress.com/feed/"
),
"IBPAD" = list(
nome = "IBPAD",
url = "http://www.ibpad.com.br/blog/analise-de-dados/feed/"
)
)
|
f175aed5fbbe3bdb0051db91b47eac1d5f070dec | 8b2b0f04ee5d217a23ca14772dc0a7600445b364 | /marked/man/crm.wrapper.Rd | da37b254ffa64060087236daa408c8cec909752f | [] | no_license | pconn/marked | 15031bf4830b4fa4769596fc96381bd08828e73a | 5e1c6ee88abbf75cb510ac7af797867d7ffceee5 | refs/heads/master | 2021-01-23T03:34:44.341589 | 2016-12-05T23:19:11 | 2016-12-05T23:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,830 | rd | crm.wrapper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crm.wrapper.R
\name{crm.wrapper}
\alias{create.model.list}
\alias{crm.wrapper}
\alias{crmlist_fromfiles}
\alias{load.model}
\alias{model.table}
\alias{rerun_crm}
\title{Automation of model runs}
\usage{
crm.wrapper(model.list,data,ddl=NULL,models=NULL,base="",
external=TRUE,run=TRUE,env=NULL,...)
create.model.list(parameters)
model.table(model.list)
load.model(x)
crmlist_fromfiles(filenames=NULL,external=TRUE)
rerun_crm(data,ddl,model.list,method=NULL,modelnums=NULL,initial=NULL,...)
}
\arguments{
\item{model.list}{matrix of model names contained in the environment of models function; each row is a model and each column is for a parameter and the value is formula name}
\item{data}{Either the raw data which is a dataframe with at least one
column named ch (a character field containing the capture history) or a
processed dataframe. For rerun_crm this should be the processed dataframe}
\item{ddl}{Design data list which contains a list element for each parameter
type; if NULL it is created; For rerun_crm, must be the same ddl as used with original run can cannot be NULL}
\item{models}{a function with a defined environment with model specifications as variables; values of model.list are some or all of those variables}
\item{base}{base value for model names}
\item{external}{if TRUE, model results are stored externally; otherwise they are stored in crmlist}
\item{run}{if TRUE, fit models; otherwise just create dml to test if model data are correct for formula}
\item{env}{environment to find model specifications if not parent.frame}
\item{...}{aditional arguments passed to crm; for rerun_crm can be used to set hessian=TRUE for specific models after they have been run}
\item{parameters}{character vector of parameter names}
\item{x}{filename of externally stored model}
\item{method}{vector of methods to use for optimization if different that previous run in rerun_crm}
\item{modelnums}{model numbers to be re-run instead of those that did not covnerge}
\item{initial}{either a fitted crm model or the model number in model.list to use for starting values}
\item{filenames}{for non-Windows machine, vector of filenames for external files must be specifed in crmlist_fromfiles including .rda extension}
}
\value{
create.model.list returns a matrix for crm.wrapper; crm.wrapper runs and stores models externally and retrurns a list of model results
and a model selection table; load.model returns model object that is stored externally
}
\description{
Some functions that help automate running a set of crm models based on parameter
specifications.
}
\details{
create.model.list creates all combinations of model specifications for the specified
set of parameters. In the calling environment it looks for objects named parameter.xxxxxx where xxxxxx can
be anything. It creates a matrix with a column for each parameter and as many rows
needed to create all combinations. This can be used as input to crm.wrapper.
crm.wrapper runs a sequence of crm models by constructing the call with the arguments
and the parameter specifications. The parameter specifications can either be in the
local environment or in the environment of the named function models. The advantage of the
latter is that it is self-contained such that sets of parameter specifications can
be selected without possibility of being over-written or accidentally changed whereas
with the former the set must be identified via a script and any in the environment will
be used which requires removing/recreating the set to be used.
}
\author{
Jeff Laake
}
\seealso{
\code{\link{crm}}
}
\keyword{models}
|
08c381d6f8204c5993d9ce4d336d1e980b77a0f5 | 160f6bc921ab575e0e4e541a36ec37e3aabad277 | /R/panel.2dsmoother.R | 7ec6768f60341a1ac9d509bd3ad2a87576f14e6b | [] | no_license | cran/latticeExtra | e7a575e32d22d36a3e068098b6073e47ea57e8d4 | 6f308186985f6bf43f4376cb54622f73adfeab35 | refs/heads/master | 2022-07-14T15:20:20.166713 | 2022-07-04T15:50:02 | 2022-07-04T15:50:02 | 17,696,992 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,568 | r | panel.2dsmoother.R |
panel.2dsmoother <-
function(x, y, z, subscripts = TRUE,
form = z ~ x * y, method = "loess", ...,
args = list(), n = 100)
{
if (length(subscripts) == 0)
return()
## allow 'form' to be passed as the first argument
missing.x <- missing(x)
if (!missing.x && inherits(x, "formula")) {
form <- x
missing.x <- TRUE
}
## use 'x', 'y', 'z' if given
## otherwise try to find them in the formula environment
if (missing.x)
x <- environment(form)$x
if (missing(y))
y <- environment(form)$y
if (missing(z))
z <- environment(form)$z
x <- x[subscripts]
y <- y[subscripts]
z <- z[subscripts]
ok <- is.finite(x) & is.finite(y) & is.finite(z)
if (sum(ok) < 1)
return()
x <- as.numeric(x)[ok]
y <- as.numeric(y)[ok]
z <- as.numeric(z)[ok]
mod <- do.call(method,
c(alist(form, data = list(x = x, y = y, z = z)),
args))
## use the limits of the data, or panel limits, whichever is smaller
lims <- current.panel.limits()
xrange <- c(max(min(lims$x), min(x)), min(max(lims$x), max(x)))
yrange <- c(max(min(lims$y), min(y)), min(max(lims$y), max(y)))
xseq <- seq(xrange[1], xrange[2], length = n)
yseq <- seq(yrange[1], yrange[2], length = n)
## zseq <- seq(min(z), max(z), length = n)
grid <- expand.grid(x = xseq, y = yseq)
fit <- predict(mod, grid)
panel.levelplot(x = grid$x, y = grid$y, z = fit, subscripts = TRUE,
...)
}
|
e8a25d1a3aa96573a4196625209e67cfa4cc43ab | 3063a9e4333d2adba42c924094117cd53189a2c9 | /man/epidemic_age_dist.Rd | 72eb8dcfe85f57d0a6b9b9aface90c6cf6810f45 | [
"MIT"
] | permissive | sbfnk/epimixr | 33ce12f12ee477207c965c664e18a3469f826904 | f62f81d290b18724579686704d91adeb2de9a0a7 | refs/heads/main | 2023-08-04T09:10:28.505132 | 2023-07-26T14:11:26 | 2023-07-26T14:11:26 | 141,565,063 | 3 | 0 | MIT | 2023-07-26T14:11:27 | 2018-07-19T10:39:51 | R | UTF-8 | R | false | true | 1,251 | rd | epidemic_age_dist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/epidemic_age_dist.r
\name{epidemic_age_dist}
\alias{epidemic_age_dist}
\title{Calculates the age distribution of an epidemic}
\usage{
epidemic_age_dist(
mixing_matrix,
r_0,
immunity = 0,
final_size_start = 0.01,
tol = 1e-05
)
}
\arguments{
\item{mixing_matrix}{A mixing matrix or set of mixing matrices, as returned
by \code{socialmixr::contact_matrix}}
\item{r_0}{basic reproduction number}
\item{immunity}{proportion immune before the epidemic}
\item{final_size_start}{starting value for inidence}
\item{tol}{tolerance for stopping the iteration}
}
\value{
A matrix of the final size(s) (proportion of susceptibles infected)
in each age group (one row per matrix contained in \code{mixing})
}
\description{
calculates the age distribution in an epidemic setting using the iterative
method of: J Wallinga, P Teunis, M Kretschmar (2006) Using Data on Social
Contacts to Estimate Age-specific Transmission Parameters for
Respiratory-spread Infectious Agents. Am J Epidemiol 164(10), 945-946.
}
\examples{
library("socialmixr")
mixing <- contact_matrix(survey = polymod, age.limits = c(0, 5, 10))
epidemic_age_dist(mixing$matrix, r_0 = 5, immunity = 0.50)
}
|
b0d0cbeb3fcf1cd8d82ca460466e99cd900b5844 | 393e89dc6404c237e40d0685ebeca2738d3c2331 | /scripts/radius_by_hadley.R | 996d8a15a7975938b34e0401de8cbadd821c3d38 | [
"MIT"
] | permissive | kornellabun/kornellabun.github.io | 40165d6bde2d6e10220649739ed49785260a4b35 | 3856e6f5f89151322646cb1f8bb3c88885fdcc42 | refs/heads/master | 2022-09-13T04:25:57.342011 | 2022-08-27T10:59:57 | 2022-08-27T10:59:57 | 174,212,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,113 | r | radius_by_hadley.R | library("ggplot2")
me <- data.frame(Time = c(0, 1, 2, 3, 4, 5, 6, 7),
Forcefull = c(2, 0, 1, 1, 2, 3, 4, 5),
Sneaky = c(3, 0, 1, 1, 2, 3, 4, 5),
Carefull = c(2, 0, 1, 1, 2, 3, 4, 5),
Flashy = c(1, 0, 1, 1, 2, 3, 4, 5),
Clever = c(0, 0, 1, 1, 2, 3, 4, 5),
Quick = c(1, 0, 1, 1, 2, 3, 4, 5))
me <- reshape2::melt(me[1, ], id.vars = "Time")
me$value <- me$value/ 5
coord_radar <- function (theta = "x", start = 0, direction = 1)
{
theta <- match.arg(theta, c("x", "y"))
r <- if (theta == "x")
"y"
else "x"
#dirty
rename_data <- function(coord, data) {
if (coord$theta == "y") {
plyr::rename(data, c("y" = "theta", "x" = "r"), warn_missing = FALSE)
} else {
plyr::rename(data, c("y" = "r", "x" = "theta"), warn_missing = FALSE)
}
}
theta_rescale <- function(coord, x, scale_details) {
rotate <- function(x) (x + coord$start) %% (2 * pi) * coord$direction
rotate(scales::rescale(x, c(0, 2 * pi), scale_details$theta.range))
}
r_rescale <- function(coord, x, scale_details) {
scales::rescale(x, c(0, 0.4), scale_details$r.range)
}
ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start,
direction = sign(direction),
is_linear = function(coord) TRUE,
render_bg = function(self, scale_details, theme) {
scale_details <- rename_data(self, scale_details)
theta <- if (length(scale_details$theta.major) > 0)
theta_rescale(self, scale_details$theta.major, scale_details)
thetamin <- if (length(scale_details$theta.minor) > 0)
theta_rescale(self, scale_details$theta.minor, scale_details)
thetafine <- seq(0, 2 * pi, length.out = 100)
rfine <- c(r_rescale(self, scale_details$r.major, scale_details))
# This gets the proper theme element for theta and r grid lines:
# panel.grid.major.x or .y
majortheta <- paste("panel.grid.major.", self$theta, sep = "")
minortheta <- paste("panel.grid.minor.", self$theta, sep = "")
majorr <- paste("panel.grid.major.", self$r, sep = "")
ggplot2:::ggname("grill", grid::grobTree(
ggplot2:::element_render(theme, "panel.background"),
if (length(theta) > 0) ggplot2:::element_render(
theme, majortheta, name = "angle",
x = c(rbind(0, 0.45 * sin(theta))) + 0.5,
y = c(rbind(0, 0.45 * cos(theta))) + 0.5,
id.lengths = rep(2, length(theta)),
default.units = "native"
),
if (length(thetamin) > 0) ggplot2:::element_render(
theme, minortheta, name = "angle",
x = c(rbind(0, 0.45 * sin(thetamin))) + 0.5,
y = c(rbind(0, 0.45 * cos(thetamin))) + 0.5,
id.lengths = rep(2, length(thetamin)),
default.units = "native"
),
ggplot2:::element_render(
theme, majorr, name = "radius",
x = rep(rfine, each = length(thetafine)) * sin(thetafine) + 0.5,
y = rep(rfine, each = length(thetafine)) * cos(thetafine) + 0.5,
id.lengths = rep(length(thetafine), length(rfine)),
default.units = "native"
)
))
})
}
# coord_radar <- function (theta = "x", start = 0, direction = 1)
# {
# theta <- match.arg(theta, c("x", "y"))
# r <- if (theta == "x") "y" else "x"
# ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start,
# direction = sign(direction),
# is_linear = function(coord) TRUE)
# }
ggplot(me, aes(x = variable, y = value)) +
geom_polygon(aes(group = Time, color = Time), fill = "black", size = 2, show.legend = FALSE) +
xlab("") + ylab("") +
theme(axis.text.x = element_text(vjust = 1)) +
coord_radar(theta = 'x', start = 0, direction = 1) |
26db6a827834ba52238755a739a73b3603f3626f | 2ad30a3a362e0161dbc6f23cb8d5c46d0ec5496d | /docs/coronavirus_.R | 22b5363c9d21dbbad8e495fff773b865a07e065e | [] | no_license | DATAUNIRIO/worldmaps | a97bd2dfe2f463d11878255a93a644724e1621b8 | 349f55a3a0acdf2849f798a34a949dfdee911ca2 | refs/heads/master | 2023-01-29T09:59:53.056458 | 2020-05-04T21:51:58 | 2020-05-04T21:51:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,194 | r | coronavirus_.R | # 2019-nCov distribution
# web scraped data from the european centre for disease control
# up to date link ---------------------------------------------------------
# https://darwinanddavis.github.io/worldmaps/coronavirus.html
# packages ----------------------------------------------------------------
# install.packages("pacman")
require(pacman)
p_load(maps,dplyr,leaflet,xml2,rvest,ggmap,geosphere,htmltools,mapview,purrr,rworldmap,rgeos,stringr,here,htmlwidgets,readxl,httr,readr,stringi)
# set wd
here::set_here("/Users/malishev/Documents/Data/worldmaps/worldmaps/")
# scrape data from web \xml2 ---------------------------------------------------------------
url <- "https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases" # get today's data
# link: https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide
url2 <- "https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide.csv" # get historical data as of today
url3 <- "https://google.org/crisisresponse/covid19-map" # recovery data from google
# end user input ----------------------------------------------------------
# . -----------------------------------------------------------------------
# . -----------------------------------------------------------------------
# get geocode \ rgeos rworldmaps ------------------------------------------
lonlat <- getMap(resolution="low") %>% # get country lonlats from rgeos database
gCentroid(byid=TRUE) %>%
as.data.frame
lonlat$Country <- rownames(lonlat) # add country col
colnames(lonlat) <- c("Lon", "Lat","Country") # rename cols
# function for getting lonlat from rgeos database
find_lonlat <- function(country_string){
country_string_return <- lonlat %>% filter(Country %in% str_subset(lonlat$Country,country_string))
country_string_return_name <- country_string_return %>% select("Country") # get country string
print(country_string_return)
}
# function for getting current country name in cv
set_country_name <- function(country_name){
cv[str_which(cv$Country,c(country_name)),"Country"]
}
# save historical data to dir
GET(url2, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".csv")))
cv_historical <- read_csv(tf)
cv_historical %>% head
# write_csv(cv_historical,paste0(here(),"/cv_historical.csv")) # write historical data to file
# convert cv webtable to tibble \rvest
web_data <- url %>% read_html
tb <- web_data %>% html_table(trim = T)
cv <- tb[[1]] # get df
cv[is.na(cv)] <- 0 # rm nas
# get recovery and cases per million data from google
web_data_recovered <- url3 %>% read_html
cv2 <- web_data_recovered %>% html_table(trim = T)
cv2 <- cv2[[2]] # get df
cv2[is.na(cv2)] <- 0
# mod data
cv <- setNames(cv,c("Continent","Country","Cases","Deaths","Cases_last_15_days")) # set names
cv$Deaths <- cv$Deaths %>% stri_replace_all_charclass("\\p{WHITE_SPACE}","") # remove middle white space
cv$Deaths <- cv$Deaths %>% as.integer() # set as int
# get totals
cv_total <- cv %>% summarise(Total_cases = max(Cases,na.rm = T),
Total_deaths = max(Deaths,na.rm = T),
Total_recent_cases = max(Cases_last_15_days,na.rm = T))
cv <- cv[!cv$Country=="Total",] # rm total from country df
cv <- cv[!cv$Country=="Other",] # remove 'other' country
cv <- cv[!cv$Country=="Asia",] # remove 'other' country
# cv <- cv[!cv$Country==stringr::str_subset(cv$Country,"Place"),] # remove descriptive row header
cv %>% tail
# clean strings
cv$Cases <- cv$Cases %>% str_replace(" ","") %>% as.numeric()
cv$Deaths <- cv$Deaths %>% str_replace(" ","") %>% as.numeric()
cv$Country <- cv$Country %>% str_replace_all("_"," ") %>% as.character()
cv2$Recovered <- cv2$Recovered %>% str_replace_all(",","") %>% as.character()
# fix anomalies in country entries
# cv[cv$Country=="Japan",c("Cases","Deaths")] <- cv[cv$Country=="Japan",c("Cases","Deaths")] %>% as.numeric + cv[cv$Country=="Cases on an international conveyance Japan",c("Cases","Deaths")] %>% as.numeric
cv <- cv[!cv$Country==str_subset(cv$Country,"conveyance Japan"),] # remove japan duplicate
# rename countries for getting centroid later
cv[str_which(cv$Country,"Korea"),"Country"] <- "South Korea"
cv[str_which(cv$Country,"Iran"),"Country"] <- "Iran"
cv[str_which(cv$Country,"Maced"),"Country"] <- "Macedonia"
cv[str_which(cv$Country,"Pales"),"Country"] <- "Palestine* (as neither recognition nor prejudice towards the State)"
cv[str_which(cv$Country,"Ser"),"Country"] <- "Republic of Serbia" # match geocode country string in lonlat /rgeos
cv[str_which(cv$Country,"Holy"),"Country"] <- "Vatican" # match geocode country string in lonlat /rgeos
cv[str_which(cv$Country,"Brun"),"Country"] <- "Brunei" # match geocode country string in lonlat /rgeos
cv[str_which(cv$Country,"Congo"),"Country"][1] <- "Republic of the Congo" # republic of the congo
cv[str_which(cv$Country,"Democratic"),"Country"] <- "Democratic Republic of the Congo" # DRC
cv[str_which(cv$Country,"Eswa"),"Country"] <- "Swaziland" # swaziland
cv[str_which(cv$Country,"Ivo"),"Country"] <- "Ivory Coast" # ivory coast
cv[str_which(cv$Country,"Baha"),"Country"] <- "The Bahamas" # bahamas
cv[str_which(cv$Country,"Nether"),"Country"][1] <- "Netherlands"
# cv[str_which(cv$Country,"Nether"),"Country"][2] <- "Netherlands Antilles"
cv[str_which(cv$Country,"Timor"),"Country"] <- "East Timor"
cv[str_which(cv$Country,"Turks"),"Country"] <- find_lonlat("Turks")$Country
cv[str_which(cv$Country,"Cura"),"Country"] <- find_lonlat("Curac")$Country
# get totals per continent ## not run 24-2-20
# cv_continent_cases <- cv %>% filter(Country=="") %>% select(Cases)
# cv_continent_deaths <- cv %>% filter(Country=="") %>% select(Deaths)
# cv_continent_cases$Continent <- cv[,"Continent"] %>% unique
# cv_continent_deaths$Continent <- cv[,"Continent"] %>% unique
# remove empty country rows
# cv <- cv[!cv$Country=="",]
# rank data
cv <- cv %>% arrange(desc(Cases)) # rank data in descending order to layer map points
# get global case and death rankings
cv <- cv %>% mutate(Cases_ranked = (Cases %>% dense_rank %>% max + 1) - (Cases %>% dense_rank),
Deaths_ranked = (Deaths %>% dense_rank %>% max + 1) - (Deaths %>% dense_rank),
Cases_15days_ranked = (Cases_last_15_days %>% dense_rank %>% max + 1) - (Cases_last_15_days %>% dense_rank)
)
# subset
cv_country <- cv$Country
cv_cases <- cv$Cases %>% as.numeric()
cv_deaths <- cv$Deaths %>% as.numeric()
cv_total_cases <- cv_total$Total_cases
cv_total_deaths <- cv_total$Total_deaths
cv_total_recent_cases <- cv_total$Total_recent_cases
cv_recent_cases <- cv$Cases_last_15_days %>% as.numeric()
cv_cases_ranked <- cv$Cases_ranked %>% as.numeric()
cv_deaths_ranked <- cv$Deaths_ranked %>% as.numeric()
cv_cases_15days_ranked <- cv$Cases_15days_ranked %>% as.numeric()
# recovery data
cv2_country <- cv2$Location
cv2_recovered <- cv2$Recovered %>% as.numeric()
# match cv country lonlat to lonlat rgeos database
lonlat_final <- lonlat[cv_country,]
lonlat_final2 <- lonlat[cv2_country,]
lonlat_final %>% # write to dir
readr::write_csv("cv_lonlat.csv")
# add lonlat to df
cv[,c("Lon","Lat")] <- lonlat_final[,c("Lon","Lat")]
cv2[,c("Lon","Lat")] <- lonlat_final2[,c("Lon","Lat")] # recovery data
# check country name with latlon
if(any(lonlat_final$Country == cv$Country)!=TRUE){
cat("\n\n\nCheck country lonlat before plotting\n\n\n",rep("*",10))}
# fix misc latlon
cv[cv$Country=="Malaysia",c("Lon","Lat")] <- c(101.975769,4.210484) # malaysia
cv[cv$Country==cv[str_which(cv$Country,"Pales"),"Country"],c("Lon","Lat")] <- cv %>% filter(Country=="Israel") %>% select(c("Lon","Lat")) + 0.05 # displace Palestine latlon from israel
cv[cv$Country==cv[str_which(cv$Country,"Gibral"),"Country"],c("Lon","Lat")] <- cv %>% filter(Country=="Spain") %>% select(c("Lon","Lat")) + 0.05 # displace gibraltar latlon from spain
cv[cv$Country==cv[str_which(cv$Country,"Antill"),"Country"],c("Lon","Lat")] <- lonlat %>% filter(Country=="Aruba") %>% select(c("Lon","Lat")) + 0.2 # displace gibraltar latlon from spain
# check NAs
if(any(is.na(cv$Lat))==TRUE){
cat("\n\n\nLatlon in cv dataset contains NAs\n",rep("*",10),"\n")
cv[which(is.na(cv$Lat)),"Country"]
}
# find which countries show NAs/anomalies
find_lonlat("Asia")
# get current country name in cv
set_country_name("Asia")
# get numeric
lon <- cv$Lon
lat <- cv$Lat
lonlat_matrix <- matrix(c(lon,lat), ncol = 2) # get matrix for arcs
# if using nafta coords # not run 3-3-20
# get character string for nafta to rm from df and get latlon
# nafta_string <- str_subset(cv$Country,"Amer|Cana|Ecu|Mexi")
# lonlat_matrix <- cv %>% # filter out nafta
# filter(!Country %in% nafta_string) %>%
# select(c("Lon","Lat")) %>%
# unlist %>%
# matrix(ncol=2)
# nafta_lon <- cv %>% filter(Country %in% nafta_string) %>% select(c("Lon")) %>% unlist
# nafta_lat <- cv %>% filter(Country %in% nafta_string) %>% select(c("Lat")) %>% unlist
# death latlon
death_lon <- cv %>% filter(Deaths>0) %>% select(c("Lon")) %>% unlist
death_lat <- cv %>% filter(Deaths>0) %>% select(c("Lat")) %>% unlist
# get death labels
cv_deaths_labels <- cv %>% filter(Deaths>0) %>% select(Country) %>% unlist
# style -------------------------------------------------------------------
custom_tile <- names(providers)[113] # choose tiles
custom_tile2 <- names(providers)[110]
colv <- "#F90F40" # cases
colv2 <- "#FA0303" # deaths
colv3 <- "#DA740D" # recent cases
opac <- 0.7
colvec_cases <- ifelse(cv_cases > 0, colv,NaN) # get colvec w/o nafta cases
colvec_deaths <- ifelse(cv_deaths > 0 ,colv2,NaN) # remove 0 points
colvec_recent_cases <- ifelse(cv_recent_cases > 0, colv3,NaN) # remove 0 points
# set colvec for if removing nafta polylines # not run 3-3-20
# nafta_cases <- cv %>% filter(Country %in% nafta_string) %>% select("Cases") %>% unlist
# colvec_cases <- ifelse(cv_cases %in% nafta_cases,NaN,colv) # get colvec w/o nafta cases
# colvec_deaths <- ifelse(cv_deaths %in% c(0,nafta_cases),NaN,colv2) # remove 0 points
# add deaths latlon manually # not run 3-2-20
# cv_deaths_lon <- cv %>% filter(Deaths>0) %>% select("Lon") %>% unlist
# cv_deaths_lat <- cv %>% filter(Deaths>0) %>% select("Lat") %>% unlist
# text --------------------------------------------------------------------
# title
ttl <- paste0("<div style=\"color:#F90F40;\">
2019-nCov
</div>","global distribution")
# tr
heading_tr <- paste(
"<strong> Total cases <div style=\"color:#F90F40; font-size:150%\">",format(cv_total_cases,big.mark=",",scientific = F,trim = T),"</div> </strong>", "<br/>",
"<strong> Total deaths <div style=\"color:#FA0303; font-size:150%\">",format(cv_total_deaths,big.mark = ",",scientific = F,trim = T),"</div> </strong>","<br/>",
"<strong> Total cases in last 15 days <div style=\"color:#DA740D; font-size:150%\">",format(cv_total_recent_cases,big.mark = ",",scientific = F,trim = T),"</div> </strong>"
)
# bl
heading_bl <- paste(sep = "<br/>",
"Data source: <a href=https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases>
ECDC
</a>",
"Last data scrape: ", Sys.time(),
"",
"Github: <a href=https://github.com/darwinanddavis/worldmaps> @darwinanddavis </a>"
)
# labels ## not run
label_cases <- paste(
"<strong> Continent: </strong>", cv$Continent, "<br/>"
) %>% map(htmltools::HTML)
# popups
popup_cases <- paste(
"<strong> Country </strong>","<br/>", cv_country,"<br/>","<br/>",
"<strong> Cases </strong>","<br/>", cv_cases,"<br/>","<br/>",
"<strong> Global cases ranking </strong>","<br/>", cv_cases_ranked,"/",cv_cases_ranked %>% max,"<br/>","<br/>"
# "<strong> Total population: </strong>", world_pop$Country,"(1000s)","<br/>",
# "<strong> Percent of population affected: </strong>", cv_cases[1:length(world_pop$Country)]/world_pop$Country,"%","<br/>",
# "<strong> Median age: </strong>", world_medage$Country,"<br/>","<br/>"
)
popup_deaths <- paste(
"<strong> Country </strong>","<br/>", cv_country,"<br/>","<br/>",
"<strong> Deaths </strong>", "<br/>", cv_deaths,"<br/>","<br/>",
"<strong> Global death ranking </strong>","<br/>", cv_deaths_ranked,"/",cv_deaths_ranked %>% max
)
popup_recent_cases <- paste(
"<strong> Country </strong>","<br/>", cv_country,"<br/>","<br/>",
"<strong> Cases in last 15 days </strong>","<br/>", cv_recent_cases,"<br/>","<br/>",
"<strong> Global recent cases ranking </strong>","<br/>", cv_cases_15days_ranked, "/",cv_cases_15days_ranked %>% max
)
# controlbox
layer1 <- "Cases"
layer2 <- "Deaths"
layer3 <- "Cases in last 15 days"
# style options -----------------------------------------------------------
# css
# title
map_title <- tags$style(
HTML(".leaflet-control.map-title {
transform: translate(-50%,-20%);
position: fixed !important;
left: 50%;
text-align: center;
padding-left: 10px;
padding-right: 10px;
background: white; opacity: 0.5;
font-size: 40px;
font-family: Optima;
}"
))
title <- tags$div(
map_title, HTML(ttl)
)
# control box
map_control_box <- tags$style(
HTML(".leaflet-control-layers-base {
text-align: left;
padding-left: 10px;
padding-right: 10px;
background: white; opacity: 1;
font-size: 15px;
}"
))
control_box <- tags$div(
map_control_box, HTML("")
)
# text labels
style <- list(
"color" = "black",
"font-weight" = "normal",
"font-family" = "Optima",
"padding" = "3px 3px"
)
style_nafta <- list(
"color" = "#F90F40",
"font-weight" = "normal",
"font-family" = "Optima",
"padding" = "3px 3px"
)
# text label options
text_label_opt <- labelOptions(noHide = F, direction = "top", textsize = "20px",
textOnly = F, opacity = 0.5, offset = c(0,0),
style = style, permanent = T
)
text_label_opt_nafta <- labelOptions(noHide = T, direction = "top", textsize = "15px",
textOnly = F, opacity = 0.5, offset = c(0,0),
style = style_nafta, permanent = T
)
# layer options
layer_options <- layersControlOptions(collapsed = F)
# tile options
min_zoom <- 3
max_zoom <- 10
# set max map bounds
latlon_origin <- cv %>% filter(Country=="China") %>% select(c("Lon","Lat")) %>% as.numeric() # china lonlat
max_bound1 <- c(-150,90)
max_bound2 <- c(180,-90)
# layers ------------------------------------------------------------------
# titles
layer1 <- "Cases"
layer2 <- "Deaths"
layer3 <- "Cases in last 15 days"
# point size
radius_cases <- sqrt(cv_cases) * 2500
radius_deaths <- sqrt(cv_deaths) * 2500
radius_recent_cases <- sqrt(cv_recent_cases) * 2500
# easy buttons
locate_me <- easyButton( # locate user
icon="fa-crosshairs", title="Zoom to my position",
onClick=JS("function(btn, map){ map.locate({setView: true}); }"))
reset_zoom <- easyButton( # reset zoom
icon="fa-globe", title="Reset zoom",
onClick=JS("function(btn, map){ map.setZoom(3);}"))
# map ---------------------------------------------------------------------
# set arc matrix
cvm <- gcIntermediate(latlon_origin,
# lonlat_matrix[1,],
lonlat_matrix,
n=100,
addStartEnd=T,
breakAtDateLine = T,
sp=T
) %>%
leaflet() %>%
setMaxBounds(max_bound1[1],max_bound1[2],max_bound2[1],max_bound2[2]) %>%
setView(latlon_origin[1],latlon_origin[2],zoom=min_zoom) %>%
addTiles(custom_tile,
options = providerTileOptions(minZoom=min_zoom, maxZoom=max_zoom) # set zoom bounds
) %>%
addProviderTiles(custom_tile,
group = c(layer1,layer2,layer3),
options = providerTileOptions(minZoom=min_zoom, maxZoom=max_zoom) # set zoom bounds
) %>%
addPolylines(color=colvec_cases, # cases
opacity = opac,
weight = 0.5,
group = layer1) %>%
addPolylines(color=colvec_deaths, # deaths
opacity = opac,
weight = 0.5,
group = layer2) %>%
addCircles(lon,lat, # cases
weight=1,
radius= radius_cases,
color=colv,
fillColor=colv,
label = cv_country,
popup = popup_cases,
labelOptions = text_label_opt,
group = layer1) %>%
addCircles(lon,lat, # deaths
weight=1,
radius=radius_deaths,
color=colvec_deaths,
fillColor=colvec_deaths,
label = cv_country,
popup = popup_deaths,
labelOptions = text_label_opt,
group = layer2) %>%
addCircles(lon,lat, # recent cases
weight=1,
radius=radius_recent_cases,
color=colvec_recent_cases,
fillColor=colvec_recent_cases,
label = cv_country,
popup = popup_recent_cases,
labelOptions = text_label_opt,
group = layer3) %>%
# addLabelOnlyMarkers(nafta_lon,nafta_lat, # add labels for cases outside of polylines
# label=nafta_string,
# labelOptions = text_label_opt_nafta,
# group=layer1) %>%
# addLabelOnlyMarkers(death_lon,death_lat, # add labels for deaths outside of polylines
# label=cv_deaths_labels,
# labelOptions = text_label_opt_nafta,
# group=layer2) %>%
addLayersControl(
baseGroups = c(layer1,layer2,layer3),
options = layer_options) %>%
hideGroup(c(layer2,layer3)) %>%
addControl(title, "bottomleft", className = "map-title") %>%
addControl(heading_bl,"bottomleft") %>%
addControl(heading_tr, "topright") %>%
addControl(control_box, "topright", className = "control-layers-base") %>%
addEasyButton(reset_zoom) %>%
addEasyButton(locate_me)
cvm
# save outputs ------------------------------------------------------------
last.warning; geterrmessage() # get last warning and error message
cvm %>% saveWidget(here("/coronavirus.html"))
cvm %>% saveWidget(here("/worldmaps/coronavirus.html")) # save to dir
# save daily totals
cv_total_df <- data.frame("Date" = Sys.Date(),
cv_total)
# append new total to file and save to dir
start_date <- "2020-03-26"
if(start_date!=Sys.Date()){
write_csv(cv_total_df,paste0(here(),"/cv_total_df.csv"),append = T,col_names = F)
cat("New historical data saved to ",here(),"/cv_total_df.csv\n\n");Sys.Date()
}
|
0bbd2baf1ea5fa58cd1a69516cde7d2c7dfe238d | 067dce4f009de9eeae2e07e43296deb70828d884 | /pcgrr/R/kataegis.R | 809d14e7efd7e90e13931e35a626aed97f527ff6 | [
"MIT"
] | permissive | sigven/pcgr | a239ecaae57c10d7431b111fa37bc9c6f66546d8 | 04419dafe1d85df3abe6583eff615e3c57868b1b | refs/heads/master | 2023-08-10T15:05:56.523770 | 2023-05-31T18:44:27 | 2023-05-31T18:44:27 | 85,620,836 | 239 | 55 | MIT | 2023-05-31T18:44:23 | 2017-03-20T20:06:04 | R | UTF-8 | R | false | false | 10,303 | r | kataegis.R | #' Function that detects kataegis events from a data frame
#' with genomic cooordinates of mutations
#'
#' @param data data frame with somatic mutations, as produced by kataegis_input
#' @param sample_id sample identifier
#' @param build genomoe assembly build
#' @param min.mut minimum number of mutations in localized hypermutated region
#' @param max.dis maximum distance of kataegis event (basepairs)
#' @param chr column name in data that denotes chromosome
#' @param pos column name in data that denotes position
#' @param txdb transcript database (txdb)
#'
#' @return kataegis_df data frame with potential kataegis events
#'
#' @export
kataegis_detect <- function(data, sample_id = "sample_id",
build = "grch37", min.mut = 6,
max.dis = 1000,
chr = "chr", pos = "pos",
txdb = NULL) {
log4r_info(paste0("Detecting possible kataegis events (clusters of C>T ",
"(APOBEC enzyme family) and C>T/G ",
"(TLS DNA polymerase) mutations"))
assertable::assert_colnames(
data, c(chr, pos), only_colnames = F, quiet = T)
invisible(
assertthat::assert_that(
build == "grch37" | build == "grch38",
msg = paste0("Value for argument build ('",
build,
"') not allowed")))
chr.arm <- c(1.25e+08, 93300000, 9.1e+07, 50400000, 48400000,
6.1e+07, 59900000, 45600000, 4.9e+07, 40200000, 53700000,
35800000, 17900000, 17600000, 1.9e+07, 36600000, 2.4e+07,
17200000, 26500000, 27500000, 13200000, 14700000, 60600000,
12500000)
if (build == "grch38") {
chr.arm <- c(123400000, 93900000, 90900000, 5e+07, 48800000,
59800000, 60100000, 45200000, 4.3e+07, 39800000, 53400000,
35500000, 17700000, 17200000, 1.9e+07, 36800000, 25100000,
18500000, 26200000, 28100000, 1.2e+07, 1.5e+07, 6.1e+07,
10400000)
}
num <- dim(data)[1] - 5
katPoint <- matrix(nrow = num, ncol = 8)
i <- 1
mutnum <- 1
Cmutnum <- 0
for (i in 1:num) {
if (data$ref[i] %in% c("C", "G")) {
Cmutnum <- Cmutnum + 1
}
if (data$dis[i + 1] <= max.dis) {
mutnum <- mutnum + 1
} else {
if (mutnum >= min.mut) {
len <- data$pos[i] - data$pos[i - mutnum + 1] + 1
chr.n <- gsub(pattern = "chr", replacement = "", x = data$chr[i],
fixed = TRUE)
chr.n <- gsub(pattern = "X", replacement = "23", x = chr.n,
fixed = TRUE)
chr.n <- gsub(pattern = "Y", replacement = "24", x = chr.n,
fixed = TRUE)
chr.n <- as.numeric(chr.n)
if (data$pos[i] <= chr.arm[chr.n]) {
arm <- paste(chr.n, "p", sep = "")
} else if (data$pos[i - mutnum + 1] >= chr.arm[chr.n]) {
arm <- paste(chr.n, "q", sep = "")
} else {
arm <- paste(chr.n, "p, ", chr.n, "q", sep = "")
}
katPoint[i, 1:8] <-
c(sample_id, data$chr[i], data$pos[i - mutnum + 1],
data$pos[i], arm, len, mutnum, round(Cmutnum / mutnum, 3))
}
mutnum <- 1
Cmutnum <- 0
}
}
kataegis_df <- data.frame(stats::na.omit(katPoint))
names(kataegis_df) <- c("sample_id", "chrom", "start",
"end", "chrom.arm", "length", "number.mut",
"weight.C>X")
if (NROW(kataegis_df) > 0) {
for (i in 1:dim(kataegis_df)[1]) {
if (as.numeric(as.character(kataegis_df$"weight.C>X"[i])) < 0.8) {
kataegis_df$confidence[i] <- 0
} else {
chrom_i <- kataegis_df$chrom[i]
kataegis_df$confidence[i] <-
length(which(
subset(
kataegis_df,
as.numeric(
as.character(kataegis_df$"weight.C>X")) >= 0.8)$chrom == chrom_i
))
if (kataegis_df$confidence[i] > 3) {
kataegis_df$confidence[i] <- 3
}
}
}
kataegis_df <- kataegis_df %>%
dplyr::arrange(dplyr::desc(.data$confidence))
# if (!is.null(txdb)) {
# gr <-
# GenomicRanges::GRanges(
# seqnames = S4Vectors::Rle(kataegis_df$chrom),
# ranges = IRanges::IRanges(start = as.numeric(as.character(kataegis_df$start)),
# end = as.numeric(as.character(kataegis_df$end))))
# peakAnno <- annotatePeak(gr, tssRegion = c(-3000, 3000),
# TxDb = txdb, annoDb = "org.Hs.eg.db")
# kataegis_df$annotation <- peakAnno@anno$annotation
# kataegis_df$distanceToTSS <- peakAnno@anno$distanceToTSS
# kataegis_df$geneName <- peakAnno@anno$SYMBOL
# kataegis_df$geneID <- peakAnno@anno$geneId
# }
}
log4r_info(paste(dim(kataegis_df)[1],
"potential kataegis events identified",
sep = " "))
return(kataegis_df)
}
#' Function that detects kataegis events from a data frame
#' with genomic cooordinates of mutations
#'
#' @param variant_set data frame with raw set of somatic mutations
#' @param chr column name in data that denotes chromosome
#' @param pos column name in data that denotes position
#' @param ref column name in data that denotes reference allele
#' @param alt column name in data that denotes alternate allele
#' @param build genome build (grch37 or hg38)
#' @param context_size size of neighbouring sequence context
#'
#' @export
kataegis_input <- function(variant_set, chr = "chr", pos = "pos", ref = "ref",
alt = "alt", build = NULL, context_size = 10) {
invisible(assertthat::assert_that(
is.data.frame(variant_set),
msg = paste0("Argument 'variant_set' needs be of type data.frame")))
invisible(assertthat::assert_that(
build == "grch37" | build == "grch38",
msg = paste0("Value for argument build ('", build,
"') not allowed, allowed reference builds are: 'grch37' or 'grch38'")))
assertable::assert_colnames(variant_set,
c(chr, pos, ref, alt),
only_colnames = F, quiet = T)
mut_data <- variant_set[, c(chr, pos, ref, alt)]
names(mut_data) <- c("chr", "pos", "ref", "alt")
mut_data <- mut_data %>%
dplyr::filter(nchar(ref) == 1 & nchar(alt) == 1 &
stringr::str_detect(ref, "^(A|C|T|G)$") &
stringr::str_detect(alt, "^(A|C|G|T)$"))
#context_size <- 10
if (nrow(mut_data) >= 100) {
bsg <- get_genome_obj(build)
chr.lens <- as.integer(utils::head(GenomeInfoDb::seqlengths(bsg), 24))
mut_data$build <- build
ref_base <- Biostrings::DNAStringSet(mut_data$ref)
alt_base <- Biostrings::DNAStringSet(mut_data$alt)
conv.start <- mut_data$pos - context_size
conv.end <- mut_data$pos + context_size
context <- Biostrings::getSeq(bsg, mut_data$chr,
start = conv.start, end = conv.end)
if (TRUE) {
idx <- mut_data$ref %in% c("A", "G")
context[idx] <- Biostrings::reverseComplement(context[idx])
ref_base[idx] <- Biostrings::reverseComplement(ref_base[idx])
alt_base[idx] <- Biostrings::reverseComplement(alt_base[idx])
}
mut_data$alteration <- paste(ref_base, alt_base, sep = ">")
mut_data$context <- context
# Replace chr x and y with numeric value (23 and 24) for better
# ordering
seq <- gsub(pattern = "chr", replacement = "", x = mut_data$chr,
fixed = TRUE)
seq <- gsub(pattern = "X", replacement = "23", x = seq, fixed = TRUE)
seq <- gsub(pattern = "Y", replacement = "24", x = seq, fixed = TRUE)
mut_data$seq <- as.numeric(seq)
mut_data <- mut_data[order(mut_data$seq, mut_data$pos), ]
chr.lens.sum <- cumsum(as.numeric(chr.lens))
chr.lens.sum <- c(0, chr.lens.sum)
mut_data$dis <- c(mut_data$pos[1],
diff(mut_data$pos + chr.lens.sum[mut_data$seq]))
} else {
mut_data <- NULL
}
return(mut_data)
}
#' Function that generates data frame with potential kataegis events
#'
#' @param variant_set data frame with SNVs/InDels (must contain 'CHROM',
#' 'POS','REF','ALT')
#' @param sample_name name of tumor sample
#' @param build genome assembly (grch37/grch38)
#'
#' @export
generate_report_data_kataegis <- function(variant_set,
sample_name = "SampleX",
build = "grch37") {
pcg_report_kataegis <- pcgrr::init_report(class = "kataegis")
if(NROW(variant_set) == 0){
return(pcg_report_kataegis)
}
log4r_info("------")
log4r_info(
paste0("Kataegis detection from genomic distribution of SNVs"))
invisible(assertthat::assert_that(
is.data.frame(variant_set),
msg = paste0("Argument 'variant_set' needs be of type data.frame")))
assertable::assert_colnames(
variant_set, c("CHROM", "REF", "ALT", "POS"), only_colnames = F, quiet = T)
invisible(assertthat::assert_that(
build == "grch37" | build == "grch38",
msg =
paste0("Value for argument build ('", build,
"') not allowed, allowed reference builds are: 'grch37' or 'grch38'")))
chr_prefix <- FALSE
chromosome_names <- unique(variant_set[, "CHROM"])
for (m in chromosome_names) {
if (startsWith(m, "chr")) {
chr_prefix <- TRUE
}
}
if (chr_prefix == F) {
variant_set <- variant_set %>%
dplyr::mutate(CHROM = paste0("chr", .data$CHROM))
}
kataegis_data <- pcgrr::kataegis_input(variant_set, chr = "CHROM",
pos = "POS", ref = "REF",
alt = "ALT",
build = build)
if (!is.null(kataegis_data)) {
if (nrow(kataegis_data) > 100) {
pcg_report_kataegis[["eval"]] <- TRUE
pcg_report_kataegis[["events"]] <-
pcgrr::kataegis_detect(kataegis_data,
sample_id = sample_name,
build = build)
}
}else{
log4r_info(
paste0(
"No or too few SNVs (< 100) found in input - skipping kataegis detection"))
#pcg_report_kataegis[["eval"]] <- FALSE
}
return(pcg_report_kataegis)
}
|
b74a9d714d6744ad49b1e9344f32cb65ae49fb79 | 87c3c6dc1946c14b947c0437f42c520b6d038dd1 | /FINAL PROJECT.R | 2802465aac99a96724a184d2a2eea91e65c759a9 | [] | no_license | Luc-Ng/BUFN-758 | fb273e24fa125764c79dbe2d1a01d27cdb569e2d | e06dd50af97f3338729b201c2fc357e0eb22956c | refs/heads/master | 2021-07-08T09:11:52.350629 | 2017-10-05T19:23:18 | 2017-10-05T19:23:18 | 105,928,633 | 0 | 0 | null | 2017-10-05T18:59:34 | 2017-10-05T18:59:34 | null | UTF-8 | R | false | false | 1,788 | r | FINAL PROJECT.R | ###########################################################################
# Define the OLS function
# This function estimates the alpha and beta coefficients in univariate
# linear regressions. It also computes the respective standard errors.
#
# INPUTS: - y: vector of dependent variable (Tx1)
# - x: vector of independent variable (Tx1)
# (the constant is automatically added)
# OUTPUTS: - a_est= estimate of the alpha coefficient
# - b_est= estimate of the beta coefficient
# - se_alpha= standard error of the alpha coefficient
# - se_beta= standard error of the beta coefficient
# Author: Alberto Rossi #
###########################################################################
univariate_ols <- function(y,x) {
numerator=sum(x*y)-length(y)*mean(y)*mean(x); # numerator of the beta coefficient
denominator=sum(x^2)-length(y)*mean(x)^2; # denominator of the beta coefficient
b_est=numerator/denominator; # beta coefficient estimate
a_est=mean(y)-b_est*mean(x); # alpha coefficient estimate
residuals=y-a_est-b_est*x; # residuals
s_squared=(1/(length(y)-2))*sum(residuals^2); # unbiased estimate of the error variance
sum_x_squared=sum(x^2);
sum_x_minus_xbar=sum((x-mean(x))^2);
# Standard error of alpha
se_alpha=sqrt(s_squared)*(sqrt((1/length(y))*sum_x_squared)/sqrt(sum_x_minus_xbar))
# Standard error of beta
se_beta=sqrt(s_squared)*(1/sqrt(sum_x_minus_xbar))
#this will send all values back into the main program
output=c(a_est=a_est,
b_est=b_est,
se_alpha=se_alpha,
se_beta=se_beta)
}
#i love marwin
|
374b7cbeb78aeb52d7f55448274579d1f6a6e654 | 0ed56795aaaf488275bb7c63f32300d8467b4f61 | /app.R | 2fa045dbc5afcc0e6a6d35a02eaf49a26b477ec9 | [] | no_license | Anonytic/Hub | e01529c9b3592efebfe55e3c05d8f39bf15a27d0 | 409c9d8326582cfd0cd3a2a4b2262d0db393d4ef | refs/heads/master | 2022-05-26T03:47:16.448563 | 2020-05-01T18:04:49 | 2020-05-01T18:04:49 | 257,459,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,206 | r | app.R | library(shiny)
library(tools)
library(purrr)
library(openxlsx)
library(zip)
library(shinyalert)
datasetData <- c()
options(shiny.maxRequestSize = 20*1024^2)
displayTemplate <- function(input, output, session, vars)
{
template <- input$templateInputFile
if (!is.null(input$inputFile))
{
if (!is.null(template))
{
tryCatch({
templateData <- read.csv(template$datapath)
checkedVar <- c()
selectedVar <- c()
for (i in 1:length(vars()))
{
columnName <- vars()[i]
if (columnName %in% templateData[, 1])
{
checkedVar[columnName] <- TRUE
selectedVar[columnName] <- paste(templateData[grep(columnName, templateData[, 1]), 2])
}
else
{
checkedVar[columnName] <- FALSE
selectedVar[columnName] <- input[[paste(columnName, "Selected")]]
}
}
output$inputContents <- renderUI(
map(vars(), ~ fluidRow(column(width = 8,
checkboxInput(paste(.x, "Checked"), .x, value = checkedVar[.x])
)))
)
}, error = function(e)
{
shinyalert("Error", paste("An error occurred during template load: ", e), type = "error")
})
}
}
}
# Define UI
ui <- fluidPage(
useShinyalert(),
titlePanel("Anonytics"),
fluidRow(
column(width = 8,
fileInput("inputFile", "Upload a CSV/Excel File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv",
".xlsx"
)
)
)
),
uiOutput("templateContents"),
uiOutput("inputContents"),
uiOutput("templateSaveContents"),
uiOutput("downloadContents")
)
# Server function
server <- function(input, output, session)
{
data <- eventReactive(input$inputFile, {
inputFile <- input$inputFile
output$downloadContents <- renderUI(
fluidRow(column(width = 8,
checkboxInput("includeOriginal", "Include Original File", FALSE),
downloadButton("anonymize", "Anonymize Data File")
))
)
output$templateContents <- renderUI(
fluidRow(column(width = 8,
fileInput("templateInputFile", "Upload a Template File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"
)
)
)),
)
output$templateSaveContents <- renderUI(
fluidRow(
column(width = 8,
downloadButton("runTemplateSave", "Save Template")
)
)
)
tryCatch({
if (tolower(file_ext(inputFile$datapath)) == "xlsx")
{
data <- read.xlsx(inputFile$datapath)
}
else if (tolower(file_ext(inputFile$datapath)) == "csv")
{
data <- read.csv(inputFile$datapath, stringsAsFactors=FALSE)
}
else
{
shinyalert("Error", "Invalid file type.", type = "error")
}
}, error = function(e)
{
shinyalert("Error", paste("An error occurred during file upload: ", e), type = "error")
})
})
vars <- reactive(colnames(data()))
output$inputContents <- renderUI(
map(vars(), ~ fluidRow(column(width = 8,
checkboxInput(paste(.x, "Checked"), .x, TRUE)
)))
)
observeEvent(input$templateInputFile, {
displayTemplate(input, output, session, vars)
})
output$anonymize <- downloadHandler(
filename = function()
{
paste("anonymizedFile", "zip", sep=".")
},
content = function(file)
{
originalData <- data.frame(data())
newData <- data.frame(data())
anonymizedTypes <- c()
anonymizedDatasets <- c()
for (i in 1:length(vars()))
{
columnName <- vars()[i]
if (input[[paste(columnName, "Checked")]] == TRUE)
{
column <- data()[, i]
numberOfRows <- length(column)
for (n in 1:numberOfRows)
{
index <- sample(1:numberOfRows, 1)
if (index == n)
{
index = index + 1
if (index > numberOfRows)
index = 1
}
newValue <- column[index]
newData[n, i] <- paste(newValue)
}
}
}
dataFiles <- c()
setwd(tempdir())
if (input$includeOriginal == TRUE)
{
fileName <- "originalData.csv"
write.csv(originalData, fileName, row.names=FALSE)
dataFiles <- append(dataFiles, fileName)
}
fileName <- "data.csv"
write.csv(newData, fileName, row.names=FALSE)
dataFiles <- append(dataFiles, fileName)
zipr(file, files=dataFiles)
},
contentType = "application/zip"
)
output$runTemplateSave <- downloadHandler(
filename = function()
{
paste("template-", Sys.Date(), ".csv", sep="")
},
content = function(file)
{
columnNames <- c()
for (i in 1:length(vars()))
{
name <- vars()[i]
if (input[[paste(name, "Checked")]] == TRUE)
{
columnNames <- append(columnNames, name)
}
}
templateFrame <- data.frame(columnNames)
write.csv(templateFrame, file, row.names=FALSE)
},
contentType = ".csv"
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
e5db83866fcf6644d63d4fc27d7ec2e6a624d42c | 0dc5e7a4c698f67869d27a5139b84eaaffe4e6b7 | /R/time-in-hours.R | 6ba4fee86b3ac5788b3c89c6ec7969e7694124df | [] | no_license | kholsteen/geneRal | 63c73f5079271902035e27110dc2953273ab4c67 | 0b6882a4079f325b0605d01d25d8b890e600a73d | refs/heads/master | 2021-04-28T13:36:57.695078 | 2020-01-15T22:57:11 | 2020-01-15T22:57:11 | 122,109,049 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 297 | r | time-in-hours.R | #' Print elapsed time in hours
#' @export
time_in_hours <- function(toc_object) {
if (length(toc_object$msg) > 0) {
msg <- paste0(toc_object$msg, ":")
} else {
msg <- ""
}
cat(paste(msg,
sprintf("%.2f",(toc_object$toc - toc_object$tic)/3600),
"hours\n"))
}
|
ddce25e77e6a387af9ae61bf92f508104ebdd71c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RandomFields/examples/RMdeclare.Rd.R | feb842b959db968218831f8b9f70c8017cb80301 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,045 | r | RMdeclare.Rd.R | library(RandomFields)
### Name: RMdeclare
### Title: Declaration of dummy variables for statistical inference
### Aliases: RMdeclare RM_DECLARE
### Keywords: spatial models
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
## The following two examples illustrate the use of RMdeclare and the
## argument 'params'. The purpose is not to give nice statistical models
x <- seq(1, 3, 0.1)
## note that there isn't any harm to declare variables ('u')
## RMdeclare that are of no use in a simulation
model <- ~ RMexp(sc=sc1, var=var1) + RMgauss(var=var2, sc=sc2) + RMdeclare(u)
p <- list(sc1=2, var1=3, sc2=4, var2=5)
z <- RFsimulate(model = model, x=x, y=x, params=p)
plot(z)
## note that the model remains the same, only the values in the
## following list change. Here, sc1, var1, sc2 and u are estimated
## and var2 is given by a forula.
p.fit <- list(sc1 = NA, var1=NA, var2=~2 * u, sc2 = NA, u=NA)
lower <- list(sc1=20, u=5)
upper <- list(sc2=1.5, sc1=100, u=15)
f <- RFfit(model, data=z, params=p.fit, lower = lower, upper = upper)
print(f)
## The second example shows that rather complicated constructions are
## possible, i.e., formulae involving several variables, both known ('abc')
## and unknown ones ('sc', 'var'). Note that there are two different
## 'var's a unknown variable and an argument for RMwhittle
## Not run: ##D
##D ##D
##D model2 <- ~ RMexp(sc) + RMwhittle(var = g, nu=Nu) +
##D RMnugget(var=nugg) + RMexp(var=var, Aniso=matrix(A, nc=2)) +
##D RMdeclare(CCC, DD)
##D p.fit <- list(g=~sc^1.5, nugg=~sc * var * abc, sc=NA, var=~DD, Nu=NA, abc=123,
##D A = ~c(1, 2, DD * CCC, CCC), CCC = NA, DD=NA)
##D lower <- list(sc=1, CCC=1, DD=1)
##D upper <- list(sc=100, CCC=100, DD=100)
##D f2 <- RFfit(model2, data=z, params=p.fit, lower = lower, upper = upper)
##D print(f2)
## End(Not run)
## Don't show:
FinalizeExample()
## End(Don't show)
|
b2459f51a3246c7198eaf4196849f444561457f9 | 2d36db40dc11fa09e7fdf48a89b487e0f0175fb6 | /cmscu-model.R | 99a8484bb5feab55f555f0849c52176645b244ce | [] | no_license | behaeghe/capstone | 276c808050212a4307ea6a07fe001d652dd9758c | a885b8db62c338a23d1b9575c56476a09aa7ced1 | refs/heads/master | 2021-08-31T08:12:29.573176 | 2017-12-20T18:36:44 | 2017-12-20T18:36:44 | 103,161,066 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,581 | r | cmscu-model.R | require(cmscu)
##Create dictionaries
twitter_1g <- new(FrequencyDictionary,4,2^26)
twitter_2g <- new(FrequencyDictionary,4,2^26)
twitter_3g <- new(FrequencyDictionary,4,2^26)
#twitter_4g <- new(FrequencyDictionary,4,2^26)
# a text cleaning function
clean <- function(line) {
# upper-case everything
str <- tolower(line);
# strip-out small html tags
str <- gsub('<[^>]{1,2}>', '', str);
# replace all terminal punctuation with a period
str <- gsub('[[:space:]]*[.?!:;]+[[:space:]]*', '.', str);
# get rid of anything not A-Z, ', ., or whitespace
str <- gsub('[^a-z\'.[:space:]]', ' ', str);
# crowd apostrophes
# str <- gsub("[[:space:]]+([A-Z]*'[A-Z]*)", "\\1", str);
# collapse whitespace
str <- gsub('[[:space:]]+', ' ', str);
# make sure contraction's are "tight"
str <- gsub(" ?' ?", "'", str);
# make sure terminal . are tight
str <- gsub(' ?\\. ?', '.', str);
#Remove rt, retweet
str <- gsub("rt","",str)
#Remoeve any @
str <- gsub("@\\w+", "", str)
return(str);
}
dict_1g <- " "
dict_2g <- " "
dict_3g <- " "
#dict_4g <- " "
# this function lets us create n-grams from a list
ngrams <- function(lst, n) {
len <- length(lst);
sapply(1:(len-n+1), function(i) do.call(paste, as.list(lst[i:(i+n-1)])))
}
# helper function
ngrams_h <- function(lst, n) {
lst2 <- c(rep('<BOS>', n), lst, '<EOS>'); # this gets the BOS and EOS tokens inserted
len <- length(lst2);
sapply(1:(len-n+1), function(i) do.call(paste, as.list(lst2[i:(i+n-1)])))
}
# connect to the file, but don't load the contents!
twitterfile <- file('./final/en_US/data.train.80', 'r', FALSE);
i <- 500
repeat {
# select the number of reviews to read at a time. 500 = ~550kb.
tweets <- readLines(con=twitterfile, n=1);
# Break loop when you reach the end of the file
#@if (i<(10**6/500) ){ #only loop through 1000 reviews for testing your loop
if (length(tweets) == 0) { #comment out if you only want to test loop on first 1000 reviews
# disconnect the file link
close(twitterfile);
# break the loop
break;
}
j<-1
# read a single review
for (tweet in tweets){
# parse the current review
curtweet <- tweet
# clean the current review
text <- clean(curtweet)
# split reviews into sentences
for (sentence in unlist(strsplit(text, '\\.'))) {
# split to unigrams
unilist <- unlist(strsplit(sentence, ' '))
dict_1g <- unique(unlist(list(dict_1g,unilist)))
# store unigrams
twitter_1g$store(unilist)
# add beginning and end of sentence tags to unigrams, and subsequent n-grams
# (crucial for smoothing ngrams in test phase)
#bilist <- c("<BOS>",unilist,'<EOS>')
# store bigrams, use the "ngrams" function bind unigrams together
bigrams <- ngrams_h(unilist,2)
twitter_2g$store(bigrams)
dict_2g <- unique(unlist(list(dict_2g,bigrams)))
# store trigrams
#trilist <- c("<BOS>","<BOS>",unilist,'<EOS>')
trigrams <- ngrams_h(unilist,3)
twitter_3g$store(trigrams)
dict_3g <- unique(unlist(list(dict_3g,trigrams)))
i <- i + 500
}
}
cat('\r', paste('Trained', format(i,big.mark = " "), 'lines from twitter.')); #this will track your progress through your dataset!
} #else {break;}
##Finalizing vocabularies
library(tidyverse)
dict_1g <-as.tibble(data.frame(dict_1g,twitter_1g$query(dict_1g))) #make a dataframe
colnames(dict_1g) <- c("unigram","count") #rename columns so we can manipulate it
dict_1g <- dict_1g[-1,] #eliminate the " " introduced when initializing object
dict_1g <- arrange(dict_1g,desc(count)) #sort descending by count
dict_2g <-as.tibble(data.frame(dict_2g,twitter_2g$query(dict_2g))) #make a dataframe
colnames(dict_2g) <- c("unigram","count") #rename columns so we can manipulate it
dict_2g <- dict_2g[-1,] #eliminate the " " introduced when initializing object
dict_2g <- arrange(dict_2g,desc(count)) #sort descending by count
dict_3g <-as.tibble(data.frame(dict_3g,twitter_3g$query(dict_3g))) #make a dataframe
colnames(dict_3g) <- c("unigram","count") #rename columns so we can manipulate it
dict_3g <- dict_3g[-1,] #eliminate the " " introduced when initializing object
dict_3g <- arrange(dict_3g,desc(count)) #sort descending by count
##Calculating discounts for each n-gram level
D <- rbind(
#trigrams
apply(matrix(unlist(twitter_3g$histogram(4)),ncol=5,byrow=TRUE),2,median),
#bigrams
apply(matrix(unlist(twitter_2g$histogram(4)),ncol=5,byrow=TRUE),2,median),
#uni
apply(matrix(unlist(twitter_1g$histogram(4)),ncol=5,byrow=TRUE),2,median)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.