blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c077d4e424f957fce04331d0a50208c7b8da7a00 | da4e9a624488743e0d17e33534ac84ef1f2ec02d | /lesson 3.R | 3c00c5395bfe94ead1a95b5cb66f91b8485daf9a | [] | no_license | Seyonne989/test | 091c5b1ba428938453203df3d21220a26f0bbd9b | f131f7e6692151fdd2310f32a5a8077947dc9d5c | refs/heads/master | 2020-03-19T06:30:46.116401 | 2018-06-04T12:42:18 | 2018-06-04T12:42:18 | 136,027,460 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,155 | r | lesson 3.R | x <- c(18, 50, 3, 5, NA, 13, 9, 10, 24, -32, 24, 55, 7, 9, 19,
20, -21, 2, 51, 18, -18, 8, 42, 0, 1, 8, NA, 32, 4, 25,
NA, 29, 18, 6, 13, 12, 26, 4, 0, 12, 42, 11, 8, 20, -1,
61, 10, 2, 17, -40, 19, 7, NA, 41, 3, 10, -5, 0, 12, 1)
min (x)
min(x, na.rm = TRUE)
mean (x, na.rm = TRUE)
sd(x, na.rm = TRUE)
median (x, na.rm = TRUE)
sum(is.na(x))
z = matrix(x,nrow = 6)
name <- c("Tomer", "Kim", "Michal", "Shira", "Idan", "Dor", "Yael", "Aviv")
age <- c(24, 29, 32, 24, 19, 18, 30, 21)
gender <- c("M", "F", "F", "F", "M", "M", "F", "M")
salary <- c(7100, 6500, 8400, 6200, 7500, 8200, 5000, 8200)
df = data.frame(name, age, gender, salary)
df
class(df)
dim(df)
print(names(df))
print(x,6,2)
return(df(6,2))
df[6,2]
$ = c(salary(df))
df$salary
Car = read.csv("C:/Users/Yoni Drori/Desktop/studing for R/car_ads.csv")
head(Car, n = 8)
tail(Car, n = 10)
Car = Car[,-c(7) ]
colnames(Car)
colnames(Car)[1] <- "car_type"
colnames(Car)
head(Car[order(-Car$price),], n = 10)
Car = na.omit(Car)
length(unique(Car$car_type))
Car = Car[Car$price <= 42500 &
Car$body == "sedan" &
Car$year >= 2000 &
Car$engType == "Petrol",]
nrow(Car)
write.csv(Car, file = "C:/Users/Yoni Drori/Desktop/studing for R/car_ads.csv", row.names = FALSE)
hist(Car$price, col = "Gold", main = "Histogram of car prices",
xlab = "Prices")
plot(density(Car$price), main = "Car Prices Density Plot")
polygon(density(Car$price), col = "snow2", border = "slategray4")
boxplot(Car$kms, col = "lemonchiffon4", outcol="darkblue",
cex = 0.8, pch = 19, medcol = "darkblue",
main = "Kilometers Boxplot")
barplot(table(Car$year), main = "Car Grouped By Year",
xlab = "Year", ylab = "Frequency", col = "lightgreen",
border = "darkgreen")
pie(table(Car$year), col = rainbow(length(table(Car$year))),
cex = 0.5, main = "Slices of All Years in The Data")
plot(year ~ price, data = Car, col = "gray54", pch = 20,
xlab = "Price", ylab = "Year")
title(main = "Price vs Year", font.main = 4)
abline(lm(year ~ price, data= Car), col= "red", lwd = 2.5)
cor (Car$price, Car$year)
|
c00e4273e7aa986dfe2b2468317ee3ccc5a7139b | 673cbd1482075b4cfc13279b3c1ead2c161430fe | /shiny/server.R | 7e95ea71713e3e24b37197260bf9db174e005aa0 | [] | no_license | imansingh/NYS_Solar_Dashboard | 18209f7bc791bd883b60a16adb215e7e676cb875 | 133052f24d9ed295e1d09d14725791b67122341f | refs/heads/master | 2021-09-12T14:15:38.512602 | 2018-04-17T13:36:07 | 2018-04-17T13:36:07 | 106,491,774 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,446 | r | server.R | shinyServer(function(input, output){
# filter dataset based on input choices ranges selected
get_solar_filtered <- reactive({
solar_filtered = solar
# Days to complete, date applied, date completed
solar_filtered = solar_filtered %>%
filter(Days.To.Complete >= input$days_to_complete[1] |
is.na(Days.To.Complete)) %>%
filter(Days.To.Complete <= input$days_to_complete[2] |
is.na(Days.To.Complete)) %>%
filter(Date.Application.Received >= input$date_applied[1]) %>%
filter(Date.Application.Received <= input$date_applied[2]) %>%
filter(Date.Completed >= input$date_completed[1] |
is.na(Date.Completed)) %>%
filter(Date.Completed <= input$date_completed[2] |
is.na(Date.Completed))
# Project Status
if (length(input$project_status) != 2) {
if (1 %in% input$project_status){
solar_filtered = solar_filtered %>%
filter(Project.Status == 'Complete')
} else if (2 %in% input$project_status){
solar_filtered = solar_filtered %>%
filter(Project.Status == 'Pipeline')
} else solar_filtered = NULL
}
# County
if(!(is.null(input$counties))){
mtx = sapply(input$counties,
function(str)
solar_filtered$County == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = solar_filtered
# City
if(!(is.null(input$cities))){
mtx = sapply(input$cities,
function(str)
solar_filtered$City == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = solar_filtered
# Zip Code
if(!(is.null(input$zip_codes))){
mtx = sapply(input$zip_codes,
function(str)
solar_filtered$Zip.Code == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = solar_filtered
# Sector
if(!(is.null(input$sector))){
mtx = sapply(input$sector,
function(str)
solar_filtered$Sector == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = NULL
# Program Type
if(!(is.null(input$program_type))){
mtx = sapply(input$program_type,
function(str)
solar_filtered$Program.Type == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = NULL
# Solicitation
if(!(is.null(input$solicitation))){
mtx = sapply(input$solicitation,
function(str)
solar_filtered$Solicitation == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = solar_filtered
# Purchase Type
if(!(is.null(input$purchase_type))){
mtx = sapply(input$purchase_type,
function(str) solar_filtered$Purchase.Type == str)
if('None Listed' %in% input$purchase_type){
mtx = cbind(mtx,
is.na(solar$Purchase.Type))
}
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0, ]
} else solar_filtered = NULL
# Affordable Solar
if(!(is.null(input$affordable_solar))){
mtx = sapply(input$affordable_solar,
function(str) solar_filtered$Affordable.Solar == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = NULL
# Green Jobs Green NY
if(!(is.null(input$green_jobs))){
mtx = sapply(input$green_jobs,
function(str) solar_filtered$Green.Jobs.Green.NY == str)
if('None Listed' %in% input$green_jobs){
mtx = cbind(mtx, is.na(solar_filtered$Green.Jobs.Green.NY))
}
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else solar_filtered = NULL
# Contractor
if((!is.null(input$contractor)) & input$contractor_missing){
mtx = sapply(input$contractor,
function(str)
solar_filtered$Contractor == str)
mtx = cbind(mtx, is.na(solar_filtered$Contractor))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!is.null(input$contractor)){
mtx = sapply(input$contractor,
function(str)
solar_filtered$Contractor == str)
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!input$contractor_missing){
mtx = matrix(!is.na(solar_filtered$Contractor))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
}
else solar_filtered = solar_filtered
# Electric Utility
if(!(is.null(input$utility))){
mtx = sapply(input$utility,
function(str)
solar_filtered$Electric.Utility == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = solar_filtered
# Remote Net Metering
if(!(is.null(input$remote_net_metering))){
mtx = sapply(input$remote_net_metering,
function(str)
solar_filtered$Remote.Net.Metering == str)
if('None Listed' %in% input$remote_net_metering){
mtx = cbind(mtx, is.na(solar_filtered$Remote.Net.Metering))
}
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else solar_filtered = NULL
# Community Distributed Generation
if(!(is.null(input$community_distributed_generation))){
mtx = sapply(input$community_distributed_generation,
function(str)
solar_filtered$Community.Distributed.Generation == str)
solar_filtered = solar_filtered[rowSums(mtx) > 0,]
} else solar_filtered = NULL
# Primary Inverter Manufacturer
if((!is.null(input$inverter_manufacturer)) &
input$inverter_manufacturer_missing){
mtx = sapply(input$inverter_manufacturer,
function(str)
solar_filtered$Primary.Inverter.Manufacturer == str)
mtx = cbind(mtx,
is.na(solar_filtered$Primary.Inverter.Manufacturer))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!is.null(input$inverter_manufacturer)){
mtx = sapply(input$inverter_manufacturer,
function(str)
solar_filtered$Primary.Inverter.Manufacturer == str)
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!input$inverter_manufacturer_missing){
mtx = matrix(!is.na(solar_filtered$Primary.Inverter.Manufacturer))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
}
else solar_filtered = solar_filtered
# Primary Inverter Model
if((!is.null(input$inverter_model)) &
input$inverter_model_missing){
mtx = sapply(input$inverter_model,
function(str)
solar_filtered$Primary.Inverter.Model.Number == str)
mtx = cbind(mtx,
is.na(solar_filtered$Primary.Inverter.Model.Number))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!is.null(input$inverter_model)){
mtx = sapply(input$inverter_model,
function(str)
solar_filtered$Primary.Inverter.Model.Number == str)
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!input$inverter_model_missing){
mtx = matrix(!is.na(solar_filtered$Primary.Inverter.Model.Number))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
}
else solar_filtered = solar_filtered
# Total Inverter Quantity
if(input$inverter_quantity_missing){
solar_filtered = solar_filtered %>%
filter(Total.Inverter.Quantity >= input$inverter_quantity_min |
is.na(Total.Inverter.Quantity)) %>%
filter(Total.Inverter.Quantity <= input$inverter_quantity_max |
is.na(Total.Inverter.Quantity))
} else {
solar_filtered = solar_filtered %>%
filter(Total.Inverter.Quantity >= input$inverter_quantity_min) %>%
filter(Total.Inverter.Quantity <= input$inverter_quantity_max)
}
# Primary PV Module Manufacturer
if((!is.null(input$pv_manufacturer)) &
input$pv_manufacturer_missing){
mtx = sapply(input$pv_manufacturer,
function(str)
solar_filtered$Primary.PV.Module.Manufacturer == str)
mtx = cbind(mtx,
is.na(solar_filtered$Primary.PV.Module.Manufacturer))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!is.null(input$pv_manufacturer)){
mtx = sapply(input$pv_manufacturer,
function(str)
solar_filtered$Primary.PV.Module.Manufacturer == str)
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!input$pv_manufacturer_missing){
mtx = matrix(!is.na(solar_filtered$Primary.PV.Module.Manufacturer))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
}
else solar_filtered = solar_filtered
# Primary PV Module Model
if((!is.null(input$pv_model)) &
input$pv_model_missing){
mtx = sapply(input$pv_model,
function(str)
solar_filtered$PV.Module.Model.Number == str)
mtx = cbind(mtx, is.na(solar_filtered$PV.Module.Model.Number))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!is.null(input$pv_model)){
mtx = sapply(input$pv_model,
function(str)
solar_filtered$PV.Module.Model.Number == str)
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
} else if(!input$pv_model_missing){
mtx = matrix(!is.na(solar_filtered$PV.Module.Model.Number))
solar_filtered = solar_filtered[rowSums(mtx, na.rm = TRUE) > 0,]
}
else solar_filtered = solar_filtered
# Total PV Module Quantity
if(input$pv_quantity_missing){
solar_filtered = solar_filtered %>%
filter(Total.PV.Module.Quantity >= input$pv_quantity_min |
is.na(Total.PV.Module.Quantity)) %>%
filter(Total.PV.Module.Quantity <= input$pv_quantity_max |
is.na(Total.PV.Module.Quantity))
} else {
solar_filtered = solar_filtered %>%
filter(Total.PV.Module.Quantity >= input$pv_quantity_min) %>%
filter(Total.PV.Module.Quantity <= input$pv_quantity_max)
}
# PV Module Wattage
solar_filtered = solar_filtered %>%
filter(Total.Nameplate.kW.DC >= input$pv_wattage_min) %>%
filter(Total.Nameplate.kW.DC <= input$pv_wattage_max)
# Expected Annual Production
solar_filtered = solar_filtered %>%
filter(Expected.KWh.Annual.Production >= input$annual_kwh_min) %>%
filter(Expected.KWh.Annual.Production <= input$annual_kwh_max)
# Project Cost
if(input$project_cost_missing){
solar_filtered = solar_filtered %>%
filter(Project.Cost >= input$project_cost_min |
is.na(Project.Cost)) %>%
filter(Project.Cost <= input$project_cost_max |
is.na(Project.Cost))
} else {
solar_filtered = solar_filtered %>%
filter(Project.Cost >= input$project_cost_min) %>%
filter(Project.Cost <= input$project_cost_max)
}
# Cost per kW, Cost per annual kWh
solar_filtered = solar_filtered %>%
filter(Total.Cost.Per.Nameplate.kW >= input$cost_per_kw[1] |
is.na(Total.Cost.Per.Nameplate.kW)) %>%
filter(Total.Cost.Per.Nameplate.kW <= input$cost_per_kw[2] |
is.na(Total.Cost.Per.Nameplate.kW)) %>%
filter(Total.Cost.Per.Annual.KW >= input$cost_per_annual_kwh[1] |
is.na(Total.Cost.Per.Annual.KW)) %>%
filter(Total.Cost.Per.Annual.KW <= input$cost_per_annual_kwh[2] |
is.na(Total.Cost.Per.Annual.KW))
# Incentive
if(input$incentive_missing){
solar_filtered = solar_filtered %>%
filter(Incentive >= input$incentive_min |
is.na(Incentive)) %>%
filter(Incentive <= input$incentive_max |
is.na(Incentive))
} else {
solar_filtered = solar_filtered %>%
filter(Incentive >= input$incentive_min) %>%
filter(Incentive <= input$incentive_max)
}
# Incentive per kW, Incentive per annual kWh
solar_filtered = solar_filtered %>%
filter(Incentive.Per.Nameplate.kW >= input$incentive_per_kw[1] |
is.na(Incentive.Per.Nameplate.kW)) %>%
filter(Incentive.Per.Nameplate.kW <= input$incentive_per_kw[2] + .5 |
is.na(Incentive.Per.Nameplate.kW)) %>%
filter(Incentive.Per.Annual.KW >= input$incentive_per_annual_kwh[1] |
is.na(Incentive.Per.Annual.KW)) %>%
filter(Incentive.Per.Annual.KW <= input$incentive_per_annual_kwh[2] |
is.na(Incentive.Per.Annual.KW))
# Net Cost
solar_filtered = solar_filtered %>%
filter(Net.Cost >= input$net_cost_min |
is.na(Net.Cost)) %>%
filter(Net.Cost <= input$net_cost_max |
is.na(Net.Cost))
# Net Cost per kW, Net Cost per annual kWh
solar_filtered = solar_filtered %>%
filter(Net.Cost.Per.Nameplate.kW >= input$net_cost_per_kw[1] |
is.na(Net.Cost.Per.Nameplate.kW)) %>%
filter(Net.Cost.Per.Nameplate.kW <= input$net_cost_per_kw[2] |
is.na(Net.Cost.Per.Nameplate.kW)) %>%
filter(Net.Cost.Per.Annual.KW >= input$net_cost_per_annual_kwh[1] |
is.na(Net.Cost.Per.Annual.KW)) %>%
filter(Net.Cost.Per.Annual.KW <= input$net_cost_per_annual_kwh[2] |
is.na(Net.Cost.Per.Annual.KW))
})
output$cost_per_kw_slider = renderUI({
sliderInput(
'cost_per_kw',
'Cost Per Installed Wattage ($ / kW)',
min = round(min(solar$Total.Cost.Per.Nameplate.kW[
solar$Project.Cost >= input$project_cost_min &
solar$Project.Cost <= input$project_cost_max],
na.rm = TRUE) - .1, 2),
max = round(max(solar$Total.Cost.Per.Nameplate.kW[
solar$Project.Cost >= input$project_cost_min &
solar$Project.Cost <= input$project_cost_max],
na.rm = TRUE) + .1, 2),
value=c(min(solar$Total.Cost.Per.Nameplate.kW,
na.rm = TRUE),
max(solar$Total.Cost.Per.Nameplate.kW,
na.rm = TRUE)))
})
output$cost_per_annual_kwh_slider = renderUI({
sliderInput(
'cost_per_annual_kwh',
'Cost Per Annual Production ($ / kWh)',
min = round(min(solar$Total.Cost.Per.Annual.KW[
solar$Project.Cost >= input$project_cost_min &
solar$Project.Cost <= input$project_cost_max],
na.rm = TRUE) - .01, 2),
max = round(max(solar$Total.Cost.Per.Annual.KW[
solar$Project.Cost >= input$project_cost_min &
solar$Project.Cost <= input$project_cost_max],
na.rm = TRUE) + 1, 2),
value=c(min(solar$Total.Cost.Per.Annual.KW,
na.rm = TRUE),
max(solar$Total.Cost.Per.Annual.KW,
na.rm = TRUE) + 1))
})
# map with dot size, transparency and 'highlight variable' selected by user
output$map <- renderPlot({
map_base =
ny_counties_map_unfilled +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10))) +
# geom_polygon(data = get_solar_filtered(),
# aes_string()) +
scale_fill_gradient(low = 'blue',
# mid = 'yellow',
high = 'red',
trans = "log10",
labels = 'comma')
if(input$map_color_highlight != 'None' &
input$map_shape_highlight != 'None') {
map_base = map_base +
geom_point(data = get_solar_filtered(),
aes_string(x = 'Longitude',
y = 'Latitude',
color = input$map_color_highlight,
shape = input$map_shape_highlight),
size = input$map_dotsize,
alpha = input$map_transparency)
} else if (input$map_color_highlight != 'None'){
map_base = map_base +
geom_point(data = get_solar_filtered(),
aes_string(x = 'Longitude',
y = 'Latitude',
color = input$map_color_highlight),
size = input$map_dotsize,
alpha = input$map_transparency)
} else if (input$map_color_highlight != 'None'){
map_base = map_base +
geom_point(data = get_solar_filtered(),
aes_string(x = 'Longitude',
y = 'Latitude',
color = input$map_shape_highlight),
size = input$map_dotsize,
alpha = input$map_transparency)
} else {
map_base = map_base +
geom_point(data = get_solar_filtered(),
aes_string(x = 'Longitude',
y = 'Latitude'),
size = input$map_dotsize,
alpha = input$map_transparency)
}
map_base
# if(input$map_color_highlight != 'None'){
# map_base + geom_point(data = get_solar_filtered(),
# aes_string(x = 'Longitude',
# y = 'Latitude',
# color = input$map_color_highlight))
# } else map_base
# if(input$map_highlight)
# if(input$map_highlight %in% categorial_small){
# counties_no_fill +
# geom_point(data = get_solar_filtered(),
# aes_string(x = "Longitude",
# y = "Latitude",
# color = input$map_highlight),
# size = input$map_dotsize,
# alpha = input$map_transparency) +
# guides(colour = guide_legend(override.aes = list(size=10))) +
# scale_fill_brewer(palette = 'YlOrRd') +
# coord_quickmap()
# }
})
#show histogram or bar chart using ggplot2, with plot and 'highlight' variables selected by user
output$histogram <- renderPlot({
if(input$histogram_highlight != 'None'){
if (input$histogram_selected %in%
c(categorial_small, categorical_medium)){
ggplot(data = get_solar_filtered(),
aes_string(x = input$histogram_selected)) +
geom_bar(aes_string(# color = 'black',
fill=input$histogram_highlight))
# position = position_dodge()))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$histogram_selected)) +
geom_histogram(aes_string(fill=input$histogram_highlight))
}
} else {
if (input$histogram_selected %in%
c(categorial_small, categorical_medium)){
ggplot(data = get_solar_filtered()) +
# aes_string(x = input$histogram_selected)) +
geom_bar(aes_string(x = input$histogram_selected))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$histogram_selected)) +
geom_histogram()
}
}
})
# show scatterplot using ggplot2, with x, y, and 'highlight' variables selected by user
output$scatterplot <- renderPlot({
if(input$scatter_color_highlight != 'None' &
input$scatter_shape_highlight != 'None'){
if (input$regression == 1){
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(color = input$scatter_color_highlight,
shape = input$scatter_shape_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
geom_smooth(color = 'red', na.rm = TRUE) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(color = input$scatter_color_highlight,
shape = input$scatter_shape_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
}
} else if(input$scatter_color_highlight != 'None'){
if (input$regression == 1){
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(color = input$scatter_color_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
geom_smooth(color = 'red', na.rm = TRUE) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(color=input$scatter_color_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
}
} else if(input$scatter_shape_highlight != 'None'){
if (input$regression == 1){
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(shape = input$scatter_shape_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
geom_smooth(color = 'red', na.rm = TRUE) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(aes_string(shape = input$scatter_shape_highlight),
size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
}
} else {
if (input$regression == 1){
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
geom_smooth(color = 'red', na.rm = TRUE) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
} else {
ggplot(data = get_solar_filtered(),
aes_string(x = input$scatter_x ,
y = input$scatter_y)) +
geom_point(size = input$scatter_dotsize,
alpha = input$scatter_transparency) +
guides(color = guide_legend(override.aes = list(size = 10)),
shape = guide_legend(override.aes = list(size = 10)))
}
}
})
#show boxplot using ggplot2, with plot and 'highlight' variables selected by user
output$boxplot <- renderPlot({
ggplot(data = get_solar_filtered(),
aes_string(x = input$boxplot_x,
y = input$boxplot_y)) +
geom_boxplot()
})
# show data using DataTable
output$filtered_table <- DT::renderDataTable({
req(input$cost_per_kw)
datatable(get_solar_filtered()[-1],
rownames=FALSE,
options = list(scrollX = TRUE,
scrollY = TRUE,
autoWidth = TRUE)) # %>%
# formatStyle(input$selected, background="skyblue", fontWeight='bold')
})
output$full_table <- DT::renderDataTable({
datatable(solar[-1],
rownames=FALSE,
options = list(scrollX = TRUE,
scrollY = TRUE,
autoWidth = TRUE)) # %>%
# formatStyle(input$selected, background="skyblue", fontWeight='bold')
})
# show statistics using infoBox
#output$maxBox <- renderInfoBox({
# max_value <- max(solar[,input$selected])
#max_state <-
# solar$state.name[solar[,input$selected] == max_value]
#infoBox(max_state, max_value, icon = icon("hand-o-up"))
# })
#output$minBox <- renderInfoBox({
# min_value <- min(state_stat[,input$selected])
# min_state <-
# state_stat$state.name[state_stat[,input$selected] == min_value]
#infoBox(min_state, min_value, icon = icon("hand-o-down"))
#})
#output$avgBox <- renderInfoBox(
# infoBox(paste("AVG.", input$selected),
# mean(state_stat[,input$selected]),
# icon = icon("calculator"), fill = TRUE))
})
|
b7e96264e6cec4f0f49e7b799bca9db4087ec8bf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dse/examples/toSSChol.Rd.R | 1abdad0b38ee7b00acda4117c49aa9062545f263 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 302 | r | toSSChol.Rd.R | library(dse)
### Name: toSSChol
### Title: Convert to Non-Innovation State Space Model
### Aliases: toSSChol toSSChol.TSmodel toSSChol.TSestModel
### Keywords: ts
### ** Examples
data("eg1.DSE.data.diff", package="dse")
model <- estVARXls(eg1.DSE.data.diff)
model <- toSSChol(model)
|
278ac57822e7446d2526a551f5ffb1437752a9ce | 3d4ee3bc0d87f2831641d0d2d8420f4492a7851a | /run_analysis.R | b4075707fbd7ab33ab0ece8ed05f160c332bfc22 | [] | no_license | tatishiroma/Getting-and-Cleaning-Data | d074c3fa8dd1183dc7fd647ac8ba24870a7f8d45 | 51d4e2cdb91346162aa3195190d5d9b71964fc3e | refs/heads/master | 2022-12-05T17:56:31.352403 | 2020-08-09T23:44:46 | 2020-08-09T23:44:46 | 285,864,569 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,277 | r | run_analysis.R | # Load necessary libraries
library(dplyr)
# Check for data and download if necessary
if(!file.exists("UCI HAR Dataset")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dir.create("UCI HAR Dataset")
download.file(fileURL, destfile = "UCI-HAR-dataset.zip", method = "curl")
unzip("./UCI-HAR-dataset.zip")
}
# 1. Merges the training and the test sets to create one data set.
x.train <- read.table("./UCI HAR Dataset/train/X_train.txt")
x.test <- read.table("./UCI HAR Dataset/test/X_test.txt")
x <- rbind(x.train, x.test)
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject <- rbind(subject.train, subject.test)
y.train <- read.table("./UCI HAR Dataset/train/y_train.txt")
y.test <- read.table("./UCI HAR Dataset/test/y_test.txt")
y <- rbind(y.train, y.test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./UCI HAR Dataset/features.txt")
mean.sd.cols <- grep("(.*)mean[^F]|std(.*)", features[, 2])
x.mean.sd <- x[, mean.sd.cols]
# 3. Uses descriptive activity names to name the activities in the data set.
names(x.mean.sd) <- features[mean.sd.cols, 2]
names(x.mean.sd) <- tolower(names(x.mean.sd))
names(x.mean.sd) <- gsub("\\(|\\)", "", names(x.mean.sd))
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
activities[, 2] <- tolower(as.character(activities[, 2]))
activities[, 2] <- gsub("_", "", activities[, 2])
y[, 1] = activities[y[, 1], 2]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(y) <- "activity"
colnames(subject) <- "subject"
data <- cbind(subject, x.mean.sd, y)
str(data)
write.table(data, "./Getting-and-Cleaning-Data/merged.txt", row.names = FALSE)
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
mean.df <- aggregate(x = data, by = list(activities = data$activity, subject = data$subject), FUN = mean)
mean.df <- mean.df[, !(colnames(mean.df) %in% c("subject", "activity"))]
str(mean.df)
write.table(mean.df, "./Getting-and-Cleaning-Data/meandataset.txt", row.names = FALSE) |
42c06e319fd639a26dd7b64ddf741857a4ceeef4 | 0128ac2d1aa93193645f8d032ded9ab7565106ad | /run_analysis.R | eb5288f662aa8c391984197a008ec21328621e85 | [] | no_license | EzraZed/getdata-15 | 02eeca477adae1be72ee389e1a965fe0e215e8a3 | fa7a9ee7a17fdc7eb826e7ea3744d5489232e53f | refs/heads/master | 2021-01-23T13:36:39.187890 | 2015-06-21T20:03:10 | 2015-06-21T20:03:10 | 37,822,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,017 | r | run_analysis.R | install.packages("dplyr")
library(dplyr)
raw_x_test <- read.table("UCI HAR Dataset/test/X_test.txt") ##2947, 561
raw_y_test <- read.table("UCI HAR Dataset/test/y_test.txt") ##2947, 1
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt") ##2947, 1
raw_x_train <- read.table("UCI HAR Dataset/train/X_train.txt") ##7352, 561
raw_y_train <- read.table("UCI HAR Dataset/train/y_train.txt") ##7352, 1
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")##7352, 1
raw_features <- read.table("UCI HAR Dataset/features.txt", header = FALSE) ##561, 2
## read in all necessary files
all_x <- rbind(raw_x_train, raw_x_test)
colnames(all_x) <- as.character(raw_features[,2])
## combine all x data, name columns with features
all_subject <- rbind(subject_train, subject_test)
colnames(all_subject) <- "subject"
## combine all subject data, name column
all_y <- rbind(raw_y_train, raw_y_test)
colnames(all_y) <- "activity"
## combine all y/activity, data, name column
data <- cbind(all_x, all_subject, all_y)
## combine all data frames
mean <- data[grep("mean\\(.", names(data))]
std <- data[grep("std\\(.", names(data))]
mean_std <- cbind(all_subject, all_y, mean, std)
## extract mean, std info, build data frame with subject, activity and extraced info
mean_std[,2]<-gsub(1, "walking", mean_std$activity)
mean_std[,2]<-gsub(2, "walking_upstairs", mean_std$activity)
mean_std[,2]<-gsub(3, "walking_downstairs", mean_std$activity)
mean_std[,2]<-gsub(4, "sitting", mean_std$activity)
mean_std[,2]<-gsub(5, "standing", mean_std$activity)
mean_std[,2]<-gsub(6, "laying", mean_std$activity)
## rename activity
tidy_data <- aggregate(mean_std[3:68], list(activity = mean_std$activity, subject = mean_std$subject), mean)
tidy_data <- select(tidy_data, subject, activity, 3:68)
## get mean of each column of measurements for each activity, each subject
## reorganize back to subject/activity/data
write.table(tidy_data, "tidy_data.txt", row.name=FALSE)
## output final tidy data |
c528f8f321908d25cc6a078cb00ddbfed03e184f | 94a4704c35e7518633bf9789bfef446886dd1fa0 | /Seminar_02-Data-visualisation.R | 878d542b5c028e87eab5fd6222bf2ef2c46d95f4 | [] | no_license | JasperH11/Decision-Analysis_2021 | de31dc3216a62055b40ab57d2e3d1ea683649936 | 37719f879aefea357321b9d35489dcb49575cba1 | refs/heads/main | 2023-06-01T05:52:21.720617 | 2021-06-28T09:38:37 | 2021-06-28T09:38:37 | 366,649,075 | 0 | 2 | null | 2021-05-12T15:41:08 | 2021-05-12T08:41:49 | null | UTF-8 | R | false | false | 4,389 | r | Seminar_02-Data-visualisation.R | # Seminar 2 - Data visualisation
# Participants data ----
# Load data
library(readr)
urlfile = "https://raw.githubusercontent.com/CWWhitney/teaching_R/master/participants_data.csv"
participants_data <- read_csv(url(urlfile))
# Bar plot
plot(participants_data$academic_parents)
# Box plot
plot(participants_data$academic_parents, participants_data$days_to_email_response)
# Load ggplot
library(ggplot2)
# Participants data
ggplot(data = participants_data,
aes(x = letters_in_first_name,
y = days_to_email_response,
color = academic_parents,
size = working_hours_per_day)) +
geom_point()
# Iris data ----
ggplot(data = iris,
aes(x = Sepal.Length,
y = Petal.Length,
color = Species,
size = Petal.Width)) +
geom_point()
# Diamonds data ----
ggplot(data = diamonds,
aes(x = carat,
y = price,
alpha = 0.2)) +
geom_point()
ggplot(data = diamonds,
aes(x = log(carat), # log() added
y = log(price),
alpha = 0.2)) +
geom_point()
library(dplyr)
dsmall <- top_n(diamonds, n = 100)
ggplot(data = dsmall,
aes(x = carat,
y = price,
color = color)) + # Mark colour of the diamonds by colours in the plot
geom_point()
ggplot(data = dsmall,
aes(x = carat,
y = price,
shape = cut)) + # Mark cut of the diamonds by shape in the plot
geom_point()
ggplot(data = diamonds,
aes(x = carat,
y = price,
alpha = I(0.1), # Inhibit interpretation of "0.1", treat as constant argument
color = I("blue"))) + # Inhibit interpretation of "blue", treat as constant argument
geom_point()
# geom options
ggplot(data = dsmall,
aes(x = carat,
y = price)) +
geom_point() +
geom_smooth()
ggplot(data = dsmall,
aes(x = carat,
y = price)) +
geom_point() +
geom_smooth(method = 'glm') # Generalised linear model
# Boxplots
ggplot(data = dsmall,
aes(x = color,
y = price/carat,
alpha = I(0.2))) +
geom_boxplot() +
geom_jitter() # Adding jitter points
# Histograms
ggplot(data = diamonds,
aes(x = carat,
fill = color, # colour of the diamonds
alpha = I(0.3))) + # set opacity
geom_density()
# mpg data ----
# subset
ggplot(data = mpg,
aes(x = displ,
y = hwy,
color = cyl)) +
geom_point() +
geom_smooth(method = "lm")
ggplot(data = mpg,
aes(x = displ,
y = hwy,
color = factor(cyl))) + # cylinders are now regarded as a numeric factor
geom_point() +
geom_smooth(method = "lm")
# mtcars data ----
# Set title and labels
ggplot(mtcars, aes(mpg, hp,
col = gear)) +
geom_point() +
ggtitle("My Title") +
labs(x = "The x label",
y = "The y label",
col = "Legend title")
# Using dplyr, ggplot2 and reshape2 ----
library(reshape2)
part_data <- select_if(participants_data, is.numeric) # Select only numeric categories
cormat <- round(cor(part_data), 1) # Round correlation matrix of part_data to 1 digit
melted_cormat <- melt(cormat) # Makes data more usable
# Visualise as tile plot
ggplot(data = melted_cormat,
aes(x = Var1,
y = Var2,
fill = value)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 45, # Tilt text by 45 degrees
hjust = 1)) # Add tick marks
# Export figures ----
png(file = "cortile.png",
width = 7,
height = 6,
units = "in",
res = 300)
ggplot(data = melted_cormat,
aes(x = Var1,
y = Var2,
fill = value)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 45,
hjust = 1))
dev.off()
# gganimate and datasauRus ----
library(datasauRus)
library(gganimate)
ggplot(datasaurus_dozen,
aes(x = x,
y = y)) +
geom_point() +
theme_minimal() +
transition_states(dataset, 3, 1) +
ease_aes('cubic-in-out') # Gives a 100 images instead of animation
ggplot(data = dsmall,
aes(x = carat,
y = price,
cplor = color)) +
geom_line() +
transition_reveal(carat) +
ease_aes("linear") +
labs(title = 'Diamond carat: {frame_along}')
|
b7173461b10ff85c5d9b9741ae8358a6371894c2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AnnotationDbi/examples/GOColsAndKeytypes.Rd.R | 52ac44b526ed036b359807671bf7a32395069982 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 590 | r | GOColsAndKeytypes.Rd.R | library(AnnotationDbi)
### Name: GOID
### Title: Descriptions of available values for 'columns' and 'keytypes'
### for GO.db.
### Aliases: GOID TERM ONTOLOGY DEFINITION
### Keywords: utilities manip
### ** Examples
library(GO.db)
## List the possible values for columns
columns(GO.db)
## List the possible values for keytypes
keytypes(GO.db)
## get some values back
keys <- head(keys(GO.db))
keys
select(GO.db, keys=keys, columns=c("TERM","ONTOLOGY"),
keytype="GOID")
## More infomation about the dates and original sources for these data:
metadata(GO.db)
|
628a6bb0281f44207977b22dd88ea7588eded6af | c62120f9e01c727bf4956fc7f70fcc6ad5f2186a | /R/alignSubspaceSeurat.R | 21f86dee314621ec3a1684ddaf4a4e15c0dcc2a6 | [] | no_license | ycao6928/scPred | 446f529dc2f38f92bd8a5b125982751d3ccd0ade | 31b1d5362cb42702601614ce44a67ef3c0fcb01e | refs/heads/master | 2020-06-01T18:02:43.525899 | 2019-06-08T10:38:14 | 2019-06-08T10:38:14 | 190,875,581 | 0 | 0 | null | 2019-06-08T10:37:53 | 2019-06-08T10:37:52 | null | UTF-8 | R | false | false | 8,767 | r | alignSubspaceSeurat.R | #' @title Align low-dimensional space using Seurat algorithm
#' @description Uses the manifold-alignment Seurat algorithm to align the training eigenspace and the prediction projection
#' See ?AlignSubspace() for more details. Note: this helper function is a modified version from Seurat.
#' @author José Alquicira Hernández
#' @importFrom pbapply pbsapply
.alignSubspaceSeurat <- function (object, reduction.type = "pca.scpred", grouping.var = "dataset_dummy", dims.align,
num.possible.genes = 2000, num.genes = 30, show.plots = FALSE,
verbose = TRUE, ...)
{
parameters.to.store <- as.list(environment(), all = TRUE)[names(formals("AlignSubspace"))]
object <- Seurat:::SetCalcParams(object = object, calculation = paste0("AlignSubspace.",
reduction.type), ... = parameters.to.store)
ident.orig <- object@ident
object <- SetAllIdent(object = object, id = grouping.var)
levels.split <- names(x = sort(x = table(object@ident), decreasing = T))
num.groups <- length(levels.split)
objects <- list()
for (i in 1:num.groups) {
objects[[i]] <- SubsetData(object = object, ident.use = levels.split[i])
}
object@ident <- ident.orig
cc.loadings <- list()
scaled.data <- list()
cc.embeds <- list()
for (i in 1:num.groups) {
if (verbose) {
cat(paste0("Rescaling group ", i, "\n"), file = stderr())
}
objects[[i]] <- ScaleData(object = objects[[i]], display.progress = verbose,
...)
objects[[i]]@scale.data[is.na(x = objects[[i]]@scale.data)] <- 0
objects[[i]] <- ProjectDim(object = objects[[i]], reduction.type = reduction.type,
do.print = FALSE)
cc.loadings[[i]] <- GetGeneLoadings(object = objects[[i]],
reduction.type = reduction.type, use.full = TRUE)
cc.embeds[[i]] <- GetCellEmbeddings(object = objects[[i]],
reduction.type = reduction.type)
scaled.data[[i]] <- objects[[i]]@scale.data
}
cc.embeds.all <- GetCellEmbeddings(object = object, reduction.type = reduction.type,
dims.use = dims.align)
colnames(cc.embeds.all) <- paste0("A", colnames(x = cc.embeds.all))
cc.embeds.orig <- cc.embeds.all
for (cc.use in dims.align) {
for (g in 2:num.groups) {
if (verbose) {
cat(paste0("Aligning dimension ", cc.use, "\n"),
file = stderr())
}
genes.rank <- data.frame(rank(x = abs(x = cc.loadings[[1]][,
cc.use])), rank(x = abs(x = cc.loadings[[g]][,
cc.use])), cc.loadings[[1]][, cc.use], cc.loadings[[g]][,
cc.use])
genes.rank$min <- apply(X = genes.rank[, 1:2], MARGIN = 1,
FUN = min)
genes.rank <- genes.rank[order(genes.rank$min, decreasing = TRUE),
]
genes.top <- rownames(x = genes.rank)[1:min(num.possible.genes,
nrow(genes.rank))]
bicors <- list()
for (i in c(1, g)) {
cc.vals <- cc.embeds[[i]][, cc.use]
if (verbose) {
bicors[[i]] <- pbsapply(X = genes.top, FUN = function(x) {
return(Seurat:::BiweightMidcor(x = cc.vals, y = scaled.data[[i]][x,
]))
})
}
else {
bicors[[i]] <- sapply(X = genes.top, FUN = function(x) {
return(Seurat:::BiweightMidcor(x = cc.vals, y = scaled.data[[i]][x,
]))
})
}
}
genes.rank <- data.frame(rank(x = abs(x = bicors[[1]])),
rank(x = abs(x = bicors[[g]])), bicors[[1]],
bicors[[g]])
genes.rank$min <- apply(X = abs(x = genes.rank[,
1:2]), MARGIN = 1, FUN = min)
genes.rank <- genes.rank[sign(genes.rank[, 3]) ==
sign(genes.rank[, 4]), ]
genes.rank <- genes.rank[order(genes.rank$min, decreasing = TRUE),
]
genes.use <- rownames(x = genes.rank)[1:min(num.genes,
nrow(genes.rank))]
if (length(genes.use) == 0) {
stop("Can't align group ", g, " for dimension ",
cc.use)
}
metagenes <- list()
multvar.data <- list()
for (i in c(1, g)) {
scaled.use <- sweep(x = scaled.data[[i]][genes.use,
], MARGIN = 1, STATS = sign(x = genes.rank[genes.use,
which(c(1, g) == i) + 2]), FUN = "*")
scaled.use <- scaled.use[, names(x = sort(x = cc.embeds[[i]][,
cc.use]))]
metagenes[[i]] <- (cc.loadings[[i]][genes.use,
cc.use] %*% scaled.data[[i]][genes.use, ])[1,
colnames(x = scaled.use)]
}
mean.difference <- mean(x = Seurat:::ReferenceRange(x = metagenes[[g]])) -
mean(x = Seurat:::ReferenceRange(x = metagenes[[1]]))
align.1 <- Seurat:::ReferenceRange(x = metagenes[[g]])
align.2 <- Seurat:::ReferenceRange(x = metagenes[[1]])
a1q <- sapply(X = seq(from = 0, to = 1, by = 0.001),
FUN = function(x) {
return(quantile(x = align.1, probs = x))
})
a2q <- sapply(X = seq(from = 0, to = 1, by = 0.001),
FUN = function(x) {
quantile(x = align.2, probs = x)
})
iqr <- (a1q - a2q)[100:900]
iqr.x <- which.min(x = abs(x = iqr))
iqrmin <- iqr[iqr.x]
if (show.plots) {
print(iqrmin)
}
align.2 <- align.2 + iqrmin
alignment <- dtw::dtw(x = align.1, y = align.2, keep.internals = TRUE)
alignment.map <- data.frame(alignment$index1, alignment$index2)
alignment.map$cc_data1 <- sort(cc.embeds[[g]][, cc.use])[alignment$index1]
alignment.map$cc_data2 <- sort(cc.embeds[[1]][, cc.use])[alignment$index2]
alignment.map.orig <- alignment.map
alignment.map$dups <- duplicated(x = alignment.map$alignment.index1) |
duplicated(x = alignment.map$alignment.index1,
fromLast = TRUE)
alignment.map <- alignment.map %>% group_by(alignment.index1) %>%
mutate(cc_data1_mapped = ifelse(dups, mean(cc_data2),
cc_data2))
alignment.map <- alignment.map[!duplicated(x = alignment.map$alignment.index1),
]
cc.embeds.all[names(x = sort(x = cc.embeds[[g]][,
cc.use])), cc.use] <- alignment.map$cc_data1_mapped
if (show.plots) {
par(mfrow = c(3, 2))
plot(x = Seurat:::ReferenceRange(x = metagenes[[1]]),
main = cc.use)
plot(x = Seurat:::ReferenceRange(x = metagenes[[g]]))
plot(x = Seurat:::ReferenceRange(x = metagenes[[1]])[(alignment.map.orig$alignment.index2)],
pch = 16)
points(x = Seurat:::ReferenceRange(metagenes[[g]])[(alignment.map.orig$alignment.index1)],
col = "red", pch = 16, cex = 0.4)
plot(x = density(x = alignment.map$cc_data1_mapped))
lines(x = density(x = sort(x = cc.embeds[[1]][,
cc.use])), col = "red")
plot(x = alignment.map.orig$cc_data1)
points(x = alignment.map.orig$cc_data2, col = "red")
}
}
}
new.type <- paste0(reduction.type, ".aligned")
new.key <- paste0("A", GetDimReduction(object = object, reduction.type = reduction.type,
slot = "key"))
object <- Seurat:::SetDimReduction(object = object, reduction.type = new.type,
slot = "cell.embeddings", new.data = cc.embeds.all)
object <- Seurat:::SetDimReduction(object = object, reduction.type = new.type,
slot = "key", new.data = new.key)
return(object)
} |
37580d4e80645ad04d2f8126c8fedb9117af6cf3 | 43b93fc1d6a858ee1eb09f037954b5a3c8bf3195 | /NOBUILD/Rsac/R/gcp.R | 43620258b480689ef942f0c909c0ed0f40effb6a | [] | no_license | abarbour/irisws | dfd7e7965d9c92c75fdc03cdaa58b9a23865f718 | 33c019c8550e319273859e1e558cd3230ba697f8 | refs/heads/master | 2021-01-17T07:26:00.671670 | 2016-06-10T22:15:21 | 2016-06-10T22:15:21 | 13,811,412 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,546 | r | gcp.R | "gcp" <- function(s, e)
{
# Method and notation from Lay and Wallace (1995)
# Convert to radians:
s$lat <- s$lat * pi/180
s$lon <- s$lon * pi/180
e$lat <- e$lat * pi/180
e$lon <- e$lon * pi/180
A <- e$lon - s$lon
b <- pi/2 - e$lat # co-lat
c <- pi/2 - s$lat # co-lat
a <- acos(cos(b) * cos(c) + sin(b) * sin(c) * cos(A))
# Azimuth:
C <- acos((cos(c) - cos(a) * cos(b))/(sin(a) * sin(b)))
# Backazimuth:
B <- acos((cos(b) - cos(a) * cos(c))/(sin(a) * sin(c)))
# See if
if(e$lon - s$lon > 0)
{
if(abs(e$lon - s$lon) > pi) # s is right of e
B <- 2*pi - B
else # s is left of e
C <- 2*pi - C
}else
{
if(abs(e$lon - s$lon) > pi) # s is left of e
C <- 2*pi - C
else # s is right of e
B <- 2*pi - B
}
# Now calculate the lats/lons of the path
# for display purposes
Rearth <- 6372795 # for m
# Rearth <- 6272.75
n <- 1000
dist <- a * Rearth
Dd <- seq(from = 0, to = dist, length = n) / Rearth
Cc <- rep(C, n)
lata <- e$lat
lona <- e$lon
latb <- asin(cos(Cc) * cos(lata) * sin(Dd) +
sin(lata) * cos(Dd))
dlon <- atan2(cos(Dd) - sin(lata) * sin(latb),
sin(Cc) * sin(Dd) * cos(lata))
lonb <- lona - dlon + pi/2
# - - - - - - - #
lonb[lonb > pi] <- lonb[lonb > pi] - 2 * pi
lonb[lonb < -pi] <- lonb[lonb < -pi] + 2 * pi
# - - - - - - - #
C <- C * 180/pi
B <- B * 180/pi
latb <- latb * 180/pi
lonb <- lonb * 180/pi
a <- a * 180/pi
return(list(B = B, C = C, a = a, lon = lonb, lat = latb))
}
|
bd61692b6b5872388010ace8762a568dba2d427a | e6ec111d279a2681d12be4ab11230e0c8c5e12ae | /r/powerplot.R | 03dc1c89b1b5e0bd069a0cc7792bfb536ef0a175 | [] | no_license | fchai1/tmp1 | 2a094b163f281ca0dd4a96da2b17bd332915d79d | 8fd9ec027940b5d960665b9ad65f41292d6a2260 | refs/heads/master | 2021-09-08T13:39:14.358937 | 2018-03-10T00:07:54 | 2018-03-10T00:07:54 | 111,469,791 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,599 | r | powerplot.R | sampleRate = 1000000000
readdata <- function(filename) {
fmlog <- filename
datatable <- read.csv(fmlog, header=FALSE, sep=" ",as.is = TRUE, stringsAsFactors=FALSE)
x <- datatable[, 1]
y <- datatable[, 4]
# plot(x, y)
x <- tail(x, length(x) - 3)
y <- tail(y, length(y) - 3)
list(x, y)
}
sampledata <- function(freq, x, y, interval) {
newx = c()
newy = c()
i <- 0
xstart = x[1]
currenty = y[1]
currentidx = 1
while(i < length(x)) {
samplex <- xstart + interval * (currentidx - 1)
if(samplex >= x[i + 1])
{
newx[currentidx] = x[i + 1]
newy[currentidx] = y[i + 1]
i <- i + 1
currentidx <- currentidx + 1
next
}
newx[currentidx] <- samplex
newy[currentidx] <- (y[i + 1] - y[i]) / (x[i + 1] - x[i]) * (samplex - x[i]) + y[i]
currentidx <- currentidx + 1
}
c(newx, newy)
}
tovector1 <- function(intp) {
i <- 1
vec <- c()
while(i < length(intp)){
vec[i] <- intp[i]
i<-i+1
}
vec
}
generateFeatureData <- function(x, y, sampleintval, maxastraining, numberoftraining, tag) {
interval = maxastraining / numberoftraining
i <- 0
minlengh = 100000000000000
ret = list()
while(i < numberoftraining) {
subsetx = x[(i * interval) : ((i + 1) * interval)]
subsety = y[(i * interval) : ((i + 1) * interval)]
sdata <- sampledata(0, subsetx, subsety, sampleintval)
samplex <- head(sdata, length(sdata)/2)
sampley <- tail(sdata, length(sdata)/2)
yfft <- fft(sampley)
yCoef = abs(yfft)
if(minlengh >= length(yCoef))
minlengh = length(yCoef)
ret[i + 1] <-list(list(yCoef))
i <- i + 1
}
i <- 0
while( i < length(ret)) {
ret[i] <- list(head(unlist(ret[i]), minlengh))
i <- i + 1
}
ret
}
data1GHz = readdata("/home/cisco/mcpat/mcpat/1GHz.log")
data1MHz = readdata("/home/cisco/mcpat/mcpat/500MHz.log")
x1GHz = as.numeric(unlist(data1GHz[1]))
y1GHz = as.numeric(unlist(data1GHz[2]))
x1MHz = as.numeric(unlist(data1MHz[1]))
y1MHz = as.numeric(unlist(data1MHz[2]))
plot(head(x1GHz, 100), head(y1GHz, 100))
plot(head(x1MHz, 200), head(y1MHz, 200))
afft <- fft(y1GHz)
aCoef1G = abs(afft)
plot(x1GHz, aCoef1G)
bfft <- fft(y1MHz)
bCoef <- abs(bfft)
plot(x1MHz, bCoef)
mfft <- fft(y1MHz)
mCoef = abs(mfft)
plot(x1MHz, mCoef)
featuredata <-generateFeatureData(x1GHz, y1GHz, 500, 160000, 40, "1GHz")
i = 1
while (i < 6) {
plot(seq(500, 500 * length(unlist(featuredata[i])), 500), unlist(featuredata[i]))
i <- i + 1
}
featuredata500M <-generateFeatureData(x1MHz, y1MHz, 500, 40000, 40, "1GHz")
i = 1
while (i < 6) {
plot(seq(500, 500 * length(unlist(featuredata500M[i])), 500), unlist(featuredata500M[i]))
i <- i + 1
}
a = head(x1GHz, 500)
b = head(y1GHz, 500)
sample100 <- sampledata(0, a, b, 500)
x=head(sample100, length(sample100)/2)
y=tail(sample100, length(sample100)/2)
yfft <- fft(y)
yCoef = abs(yfft)
plot(x, yCoef)
a = head(x1MHz, 500)
b = head(y1MHz, 500)
sample100 <- sampledata(0, a, b, 500)
x=head(sample100, length(sample100)/2)
y=tail(sample100, length(sample100)/2)
yfft <- fft(y)
yCoef = abs(yfft)
plot(x, yCoef)
#sampled1GHz <- sampledata(0, x1GHz, y1GHz, 500)
#sampled1MHz <- sampledata(0, x1MHz, y1MHz, 500)
trainingDataToMatrix <- function(listx, l) {
i <- 1
minlength <- l
a=list()
while(i < length(listx)) {
if(minlength > length(unlist(listx[i]))){
minlength <- length(unlist(listx[i]))
}
i <- i + 1
a=append(a, length(unlist(listx[i])))
}
featureMatrix = matrix(head(unlist(listx[1]), minlength), minlength)
i <- 2
while(i <= length(listx)) {
featureMatrix <- cbind(featureMatrix, head(unlist(listx[i]), minlength))
i <- i + 1
}
t(featureMatrix)
}
# verification set
numberOfFeatures = 100
combineData=append(featuredata, featuredata500M)
freq = c(rep("1G", 20), rep("500M", 20))
vdata1GHz = featuredata[21:40]
vdata500Mhz = featuredata500M[21:40]
vcombined = append(vdata1GHz, vdata500Mhz)
vData = trainingDataToMatrix(vcombined, numberOfFeatures)
# SVM starts here
tdata1GHz = featuredata[1:20]
tdata500MHz = featuredata500M[1:20]
tcombineData = append(tdata1GHz, tdata500MHz)
fData = trainingDataToMatrix(tcombineData, numberOfFeatures)
#View(fData)
training.data.frame <- as.data.frame(fData)
model <- svm(freq ~ .,data = fData, type = "C")
fd = cbind(vData, freq)
verification.data.frame <- as.data.frame(fd)
pred <- predict (model, vData)
table(predict=pred, result=freq)
|
377039661d70053b1f2fb7865747b8ab5175a1ee | 757a11cb1fbcde70426e247ceb92d208afa53e95 | /loadData.R | 67dcdfc9cab086a1e905a37cde79411604c71393 | [] | no_license | blaslopez/ExData_Plotting1 | c157587bd13b81e4e2f23d1803f4453a09e8c957 | 67780a4dee9dd3542f1e8a054d009b03935b4fee | refs/heads/master | 2021-01-14T13:44:09.146857 | 2014-12-06T23:47:01 | 2014-12-06T23:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 775 | r | loadData.R | #
# 03/12/2014 blg Coursera's
#
# This is the script for the first course project of
# "Exploratory Data Analysis" course
#
# The scripts download the data if necessary,
# loads the partial dataset and plots
#
#
library(sqldf,quietly=FALSE)
# Path to data
# get the base dir and download data if needed
source("getFile.R")
filename <- getFile()
# The data set only for selected days
cat(paste("Loading file ",filename," ..."))
ds <- read.csv.sql(filename,sep=";", "select * from file where Date in ( '1/2/2007','2/2/2007')")
cat ("done!\n")
# Close connections
sqldf()
# Column with compund DateTime will be made easy plotting
ds$DateTime<-strptime( apply( ds[ , c("Date","Time") ], 1, paste, collapse=" "), "%d/%m/%Y %H:%M:%S")
|
3e80f85cb20dfec4176261cd872a657d2a964c0f | 4e01acf5a07af95846300ed1016edf601fdbb6cc | /Rprogramming/quiz3/rprogrammingquiz3.R | 4a09d76b69a17dde27ea456b40dc6d4514694ee0 | [] | no_license | carolcoder/datasciencecoursera | 5b5c8e9ca270ba961061c4ae4b5dcacfdcf1bab5 | d80a4ac780506179ab1e25cf559256f2f9de4a31 | refs/heads/master | 2021-01-23T02:49:10.301308 | 2015-08-07T20:06:33 | 2015-08-07T20:06:33 | 30,250,558 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 579 | r | rprogrammingquiz3.R | ## Quiz3
library(datasets)
data(iris)
?iris
dimnames(iris)
#Question 1
filter <- iris[iris$Species == 'virginica',]
mean(filter$Sepal.Length)
#Question 2
iris[,1:4]
apply(iris[,1:4], 1, mean)
apply(iris[,1:4], 2, mean)
apply(iris,2,mean)
apply(iris,1,mean)
library(datasets)
data(mtcars)
?mtcars
dimnames(mtcars)
#Question 3
lapply(mtcars, mean)
with(mtcars,tapply(mpg,cyl,mean))
mean(mtcars$mpg,mtcars$cyl)
tapply(mtcars$cyl,mtcars$mpg,mean)
tapply(mtcars$mpg,mtcars$cyl,mean)
#Question 4
tapply(mtcars$hp,mtcars$cyl,mean)
209.21429 - 82.63636
#Question 5
debug(ls)
ls()
|
928da2f818c11a91d00f6cbfa72ab1c88bbe2498 | b5c723088885098e0847db9d55d3df68c354e8cf | /man/Pikl.rd | 8d94cc2b52e08bad9e7b75fc3ff92ef1af0e81bf | [] | no_license | psirusteam/TeachingSampling | 5fd95cd428bb55319a3a0273c52aa38f14d19a9b | 2659f3b16907055a14aabd722be5bfdd5bd9fad6 | refs/heads/master | 2021-06-04T04:34:18.096049 | 2020-04-21T19:55:59 | 2020-04-21T19:55:59 | 101,214,525 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,808 | rd | Pikl.rd | \name{Pikl}
\alias{Pikl}
\title{Second Order Inclusion Probabilities for Fixed Size Without Replacement Sampling Designs}
\description{Computes the second-order inclusion probabilities of each par of units in the population given a
fixed sample size design}
\usage{
Pikl(N, n, p)
}
\arguments{
\item{N}{Population size}
\item{n}{Sample size}
\item{p}{A vector containing the selection probabilities of a fixed size without replacement sampling design. The sum of the values of this vector must be one}
}
\seealso{
\code{\link{VarHT}, \link{Deltakl}, \link{Pik}}
}
\details{The second-order inclusion probability of the \eqn{kl}th units is defined as the probability that unit \eqn{k} and unit
\eqn{l} will be both included in a sample; it is denoted by \eqn{\pi_{kl}} and obtained from a given sampling design as follows:
\deqn{\pi_{kl}=\sum_{s\ni k,l}p(s)}
}
\value{The function returns a symmetric matrix of size \eqn{N \times N} containing the second-order inclusion probabilities
for each pair of units in the finite population.}
\author{Hugo Andres Gutierrez Rojas \email{hagutierrezro@gmail.com}}
\references{
Sarndal, C-E. and Swensson, B. and Wretman, J. (1992), \emph{Model Assisted Survey Sampling}. Springer.\cr
Gutierrez, H. A. (2009), \emph{Estrategias de muestreo: Diseno de encuestas y estimacion de parametros}.
Editorial Universidad Santo Tomas.
}
\examples{
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
N <- length(U)
# The sample size is n=2
n <- 2
# p is the probability of selection of every sample.
p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
# Note that the sum of the elements of this vector is one
sum(p)
# Computation of the second-order inclusion probabilities
Pikl(N, n, p)
}
\keyword{survey}
|
36e3af2dae1e78a71d56f50097d86af8c72f3d50 | 253736d8cd710ddcc825866343ff8bace67c66c7 | /R/TileDBArray.R | 57f8780885d458bd12d9a00138bcdc757c613235 | [
"MIT"
] | permissive | aaronwolen/testtest | 52c12db6ccc66cd7edcd1c286526e3a17c9182eb | ef190bfca5d475d14e824a1c82e9810ce2278050 | refs/heads/master | 2022-10-19T23:20:13.450125 | 2020-06-11T16:26:11 | 2020-06-11T16:26:11 | 271,594,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,245 | r | TileDBArray.R | #' Delayed TileDB arrays
#'
#' The TileDBArray class provides a \linkS4class{DelayedArray} backend for TileDB arrays (sparse and dense).
#'
#' @section Constructing a TileDBArray:
#' \code{TileDBArray(x, attr)} returns a TileDBArray object given:
#' \itemize{
#' \item \code{x}, a URI path to a TileDB backend, most typically a directory.
#' \item \code{attr}, a string specifying the attribute to represent in the array.
#' Defaults to the first attribute.
#' }
#' Alternatively, \code{x} can be a TileDBArraySeed object, in which case \code{attr} is ignored.
#'
#' \code{TileDBArraySeed(x, attr)} returns a TileDBArraySeed
#' with the same arguments as described for \code{TileDBArray}.
#' If \code{x} is already a TileDBArraySeed, it is returned
#' directly without further modification.
#'
#' \code{\link{DelayedArray}(x)} returns a TileDBArray object
#' given \code{x}, a TileDBArraySeed.
#'
#' In all cases, two-dimensional arrays will automatically generate a TileDBMatrix,
#' a subclass of the TileDBArray.
#'
#' @section Available operations:
#' \code{\link{extract_array}(x, index)} will return an ordinary
#' array corresponding to the TileDBArraySeed \code{x} subsetted
#' to the indices in \code{index}.
#' The latter should be a list of length equal to the number of
#' dimensions in \code{x}.
#'
#' \code{\link{type}(x)} will return a string containing the type of the TileDBArraySeed object \code{x}.
#' Currently, only \code{"integer"}, \code{"logical"} and \code{"double"}-precision is supported.
#'
#' \code{\link{is_sparse}(x)} will return a logical scalar indicating
#' whether the TileDBArraySeed \code{x} uses a sparse format in the TileDB backend.
#'
#' All of the operations described above are also equally applicable to TileDBArray objects,
#' as their methods simply delegate to those of the TileDBArraySeed.
#'
#' All operations supported by \linkS4class{DelayedArray} objects are
#' also available for TileDBArray objects.
#'
#' @aliases
#' TileDBArraySeed
#' TileDBArraySeed-class
#' TileDBArray
#' TileDBArray-class
#' TileDBMatrix
#' TileDBMatrix-class
#' show,TileDBArraySeed-method
#' is_sparse,TileDBArraySeed-method
#' type,TileDBArraySeed-method
#' extract_array,TileDBArraySeed-method
#' DelayedArray,TileDBArraySeed-method
#'
#' @author Aaron Lun
#'
#' @examples
#' data <- matrix(rpois(10000, 5), nrow=100, ncol=100)
#' B <- as(data, "TileDBArray")
#' B
#'
#' # Apply typical DelayedArray operations:
#' as.matrix(B[1:10,1:10])
#' if (require("BiocParallel")) {
#' B %*% runif(ncol(B))
#' }
#'
#' @name TileDBArray
NULL
#' @export
TileDBArraySeed <- function(x, attr) {
if (is(x, "TileDBArraySeed")) {
return(x)
}
obj <- tiledb_array(x)
on.exit(tiledb_array_close(obj))
s <- schema(obj)
d <- dim(domain(s))
a <- attrs(s)
if (missing(attr)) {
attr <- names(a)[1]
} else if (!attr %in% names(a)) {
stop("'attr' not in the TileDB attributes")
}
type <- datatype(a[[attr]])
my.type <- .rev.type.mapping[type]
if (is.na(my.type)) {
stop("'attr' refers to an unsupported type")
}
meta <- .get_metadata(x, attr, sparse=is.sparse(s))
if (my.type=="integer" && identical(meta$type, "logical")) {
my.type <- meta$type
}
dimnames <- vector("list", length(d))
if (!is.null(meta$dimnames)) {
dimnames <- meta$dimnames
}
new("TileDBArraySeed", dim=d, dimnames=dimnames, path=x,
sparse=is.sparse(s), attr=attr, type=my.type)
}
.get_metadata <- function(path, attr, sparse) {
if (sparse) {
obj <- tiledb_sparse(path, attrs=attr)
} else {
obj <- tiledb_dense(path, attrs=attr)
}
obj <- tiledb_array_open(obj, "READ") # not sure why it doesn't work with query_type="READ".
on.exit(tiledb_array_close(obj), add=TRUE)
type <- tiledb_get_metadata(obj, "type")
dimnames <- tiledb_get_metadata(obj, "dimnames")
if (!is.null(dimnames)) {
dimnames <- .unpack64(dimnames)
}
list(type=type, dimnames=dimnames)
}
#' @importFrom S4Vectors setValidity2
setValidity2("TileDBArraySeed", function(object) {
msg <- .common_checks(object)
d <- dim(object)
dn <- dimnames(object)
if (length(dn)!=length(d)) {
msg <- c(msg, "'dimnames' must the same length as 'dim'")
}
if (!all(d==lengths(dn) | vapply(dn, is.null, FALSE))) {
msg <- c(msg, "each 'dimnames' must be NULL or the same length as the corresponding dimension")
}
if (length(msg)) {
msg
} else {
TRUE
}
})
#' @export
#' @importFrom methods show
setMethod("show", "TileDBArraySeed", function(object) {
cat(sprintf("%i x %i TileDBArraySeed object\n", nrow(object), ncol(object)))
})
#' @export
setMethod("is_sparse", "TileDBArraySeed", function(x) x@sparse)
#' @export
setMethod("type", "TileDBArraySeed", function(x) x@type)
#' @export
setMethod("extract_array", "TileDBArraySeed", function(x, index) {
d <- dim(x)
for (i in seq_along(index)) {
if (is.null(index[[i]])) index[[i]] <- seq_len(d[i])
}
# Set fill to zero so that it behaves properly with sparse extraction.
fill <- switch(type(x), double=0, integer=0L, logical=FALSE)
# Hack to overcome zero-length indices.
d2 <- lengths(index)
if (any(d2==0L)) {
return(array(rep(fill, 0L), dim=d2))
}
# Figuring out what type of array it is.
if (is_sparse(x)) {
obj <- tiledb_sparse(x@path, attrs=x@attr, query_type="READ", as.data.frame=TRUE)
} else {
obj <- tiledb_dense(x@path, attrs=x@attr, query_type="READ")
}
on.exit(tiledb_array_close(obj))
if (is_sparse(x)) {
.extract_noncontiguous_sparse(obj, index, fill)
} else {
.extract_noncontiguous_dense(obj, index, fill)
}
})
#' @export
TileDBArray <- function(x, ...) {
DelayedArray(TileDBArraySeed(x, ...))
}
#' @export
setMethod("DelayedArray", "TileDBArraySeed",
function(seed) new_DelayedArray(seed, Class="TileDBMatrix")
)
#######################################################
# Hacks to get around tiledb's interface limitations. #
#######################################################
.get_contiguous <- function(obj, index) {
ndim <- length(index)
new.starts <- new.ends <- cum.width <- usdex <- vector("list", ndim)
# Identifying all contiguous unique stretches.
for (i in seq_len(ndim)) {
cur.index <- index[[i]]
re.o <- unique(sort(cur.index))
usdex[[i]] <- re.o
diff.from.last <- which(diff(re.o)!=1L)
all.starts <- c(1L, diff.from.last+1L)
all.ends <- c(diff.from.last, length(re.o))
new.starts[[i]] <- all.starts
new.ends[[i]] <- all.ends
}
# Looping across them to extract every possible combination.
collected <- list()
current <- rep(1L, ndim)
totals <- lengths(new.starts)
repeat {
absolute <- relative <- vector("list", ndim)
for (i in seq_len(ndim)) {
j <- current[i]
relative[[i]] <- new.starts[[i]][j]:new.ends[[i]][j]
absolute[[i]] <- usdex[[i]][relative[[i]]]
}
# Because tiledb_sparse just errors if there are no entries.
block <- try(do.call("[", c(list(x=obj), absolute, list(drop=FALSE))), silent=TRUE)
if (!is(block, "try-error")) {
collected[[length(collected)+1L]] <- list(relative=relative, block=block)
}
finished <- TRUE
for (i in seq_len(ndim)) {
current[i] <- current[i] + 1L
if (current[i] <= totals[i]) {
finished <- FALSE
break
} else {
current[i] <- 1L
}
}
if (finished) break
}
list(collected=collected, usdex=usdex)
}
.extract_noncontiguous_dense <- function(obj, index, fill) {
contig <- .get_contiguous(obj, index)
collected <- contig$collected
usdex <- contig$usdex
output <- array(fill, dim=lengths(usdex))
for (i in seq_along(collected)) {
current <- collected[[i]]
if (is.logical(fill)) {
storage.mode(current$block) <- "logical"
}
output <- do.call("[<-", c(list(x=output), current$relative, list(value=current$block)))
}
m <- mapply(match, x=index, table=usdex, SIMPLIFY=FALSE)
do.call("[", c(list(x=output), m, list(drop=FALSE)))
}
.extract_noncontiguous_sparse <- function(obj, index, fill) {
contig <- .get_contiguous(obj, index)
collected <- contig$collected
usdex <- contig$usdex
output <- array(fill, dim=lengths(usdex))
for (i in seq_along(collected)) {
current <- collected[[i]]
df <- current$block
if (nrow(df)==0) next
value <- df[,1]
if (is.logical(fill)) {
storage.mode(value) <- "logical"
}
# Don't rely on "SIMPLIFY" as it doesn't do the right thing for length 1.
m <- mapply(match, x=as.list(df[,1L + seq_len(ncol(df)-1L)]), table=usdex, SIMPLIFY=FALSE)
output[do.call(cbind, m)] <- value
}
m <- mapply(match, x=index, table=usdex, SIMPLIFY=FALSE)
do.call("[", c(list(x=output), m, list(drop=FALSE)))
}
|
daee3f7e9be6a8c9e799bc8510bf87b89c2b05bb | 8f63990929064c6b167ab2b6859bef177c2e0067 | /man/loglik.Rd | 6087b8f3c0adccbf092b708413c7bd50511a00f9 | [] | no_license | nanyyyyyy/spatPomp | f3378f876063249203c63a851f3d43fa916b12fe | b4b050f6421539b21324f809294d134f3feb1833 | refs/heads/master | 2022-04-15T10:09:24.483992 | 2020-04-01T15:50:59 | 2020-04-01T15:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 664 | rd | loglik.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatPomp_methods.R
\docType{methods}
\name{logLik-girfd_spatPomp}
\alias{logLik-girfd_spatPomp}
\alias{logLik,girfd_spatPomp-method}
\alias{logLik-asifd.spatPomp}
\alias{logLik,asifd.spatPomp-method}
\alias{logLik-asifird_spatPomp}
\alias{logLik,asifird_spatPomp-method}
\alias{logLik-igirfd_spatPomp}
\alias{logLik,igirfd_spatPomp-method}
\title{loglik}
\usage{
\S4method{logLik}{girfd_spatPomp}(object)
\S4method{logLik}{asifd_spatPomp}(object)
\S4method{logLik}{asifird_spatPomp}(object)
\S4method{logLik}{igirfd_spatPomp}(object)
}
\description{
loglik
loglik
loglik
loglik
}
|
4724c24caab68f2afe3dfd9dbb3984a9f03d0d59 | 65317ea9159976b3fda084b2321c9afe959f6794 | /R/formatPval.R | 925539f503c18e67b9e8aabb4ffbdb30a9b48098 | [] | no_license | cran/reporttools | c5a306a433bad475952b4515c30c557438050c5c | fc18cc11152b5ae783ff34376120bc08196a12a9 | refs/heads/master | 2021-10-28T08:56:09.300219 | 2021-10-12T15:10:02 | 2021-10-12T15:10:02 | 17,699,149 | 1 | 0 | null | 2014-09-04T01:06:29 | 2014-03-13T06:05:19 | TeX | UTF-8 | R | false | false | 2,139 | r | formatPval.R | formatPval <- function (pv,
digits = max(1, getOption("digits") - 2),
eps = 0.0001,
na.form = "NA",
scientific=FALSE,
includeEquality=FALSE)
{
## first discard NA values, which will be included as the string in "na.form"
## at the end of the function
if ((has.na <- any(ina <- is.na(pv))))
{
pv <- pv[!ina]
}
r <- character(length(is0 <- pv < eps))
## process the large p values
if (any(! is0))
{
rr <- pv <- pv[! is0]
expo <- floor(log10(ifelse(pv > 0, pv, 1e-50)))
fixp <- expo >= -3 | (expo == -4 & digits > 1)
if (any(fixp))
{
## DSB's initial version:
rr[fixp] <- format(pv[fixp], digits=digits, scientific=scientific)
## my version:
rr[fixp] <- disp(pv[fixp], 2, 2)
}
if (any(!fixp))
{
## DSB's initial version:
rr[! fixp] <- format(pv[! fixp], digits=digits, scientific=scientific)
## my version:
rr[! fixp] <- disp(pv[! fixp], 2, 2)
}
r[! is0] <- rr
}
## process the small p values
if (any(is0))
{
digits <- max(1, digits - 2)
if (any(!is0))
{
nc <- max(nchar(rr, type = "w"))
if (digits > 1 && digits + 6 > nc)
{
digits <- max(1, nc - 7)
}
}
r[is0] <- format(eps, digits = digits, scientific=scientific)
}
## add (in)equality signs
frontEqual <-
if(includeEquality)
"= "
else
""
r <- paste(ifelse(is0, "< ", frontEqual),
r,
sep="")
## finally add back the NAs
if (has.na)
{
rok <- r
r <- character(length(ina))
r[! ina] <- rok
r[ina] <- na.form
}
return(r)
}
|
9ed46072e66503031fe84bdd3d6831366fe65c89 | b6cbb7c02b05e2e222e77c8e0816ba45ceabdf77 | /Mini2Test2.R | 121eacf8fd148f6f1eecf9b0323db0dcba8dff64 | [] | no_license | sebastianpantin/Classification | 8dab55c03426ca7a2c53f64a381d48f9b40e7708 | 6287f32993b9b0e2385e8555fe292f84bbb3beba | refs/heads/master | 2020-03-11T10:17:16.911514 | 2018-04-17T19:35:39 | 2018-04-17T19:35:39 | 129,938,361 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,858 | r | Mini2Test2.R | library(GGally)
library(klaR)
library(caret)
creditcard<-read.csv("creditcard.csv")
iu<-sample(seq(1,284808),20000)
subedData <- creditcard[iu,] #25 26 27 28 29 30 31
dataNumeric <- apply(subedData,2, function(x) as.numeric(paste(x)))
dataFrame <- as.data.frame(dataNumeric)
dataFrame$Class <- as.factor(dataFrame$Class)
positiveData <- creditcard[creditcard[,31]==1,]
positiveData <- na.omit(positiveData)
negativeData <- creditcard[creditcard[,31]==0,]
negativeData <- na.omit(negativeData)
nbrNegData <- dim(negativeData)[1]
iu<-sample(seq(1,nbrNegData),1000)
restData <- negativeData[-iu,]
subedData <- negativeData[iu,]
dataNumeric <- apply(subedData,2, function(x) as.numeric(paste(x)))
dataFrame <- as.data.frame(dataNumeric)
dataNumeric <- apply(restData,2, function(x) as.numeric(paste(x)))
dataFrameRest <- as.data.frame(dataNumeric)
dataToUse = rbind(positiveData, dataFrame)
newfac<-rep(0,dim(dataToUse)[1])
newfac[dataToUse$Class==0]<-"OK"
newfac[dataToUse$Class==1]<-"Fraud"
newfac<-as.factor(newfac)
dataToUse$Class<-newfac
inTrain<-createDataPartition(dataToUse$Class,p=3/4,list=FALSE)
head(dataToUse)
ctrl<-trainControl(method="repeatedcv",repeats=2,summaryFunction=multiClassSummary)
### PDA
pdafit<-train(Class~.,data=dataToUse[inTrain,],method="pda",tuneLength=15,trControl=ctrl)
plot(pdafit)
pp<-predict(pdafit, newdata=dataToUse[-inTrain,],type="raw")
table(pp,dataToUse$Class[-inTrain])
### LDA
ldafit<-train(Class~.,data=dataToUse[inTrain,],method="lda",tuneLength=15,trControl=ctrl)
lp<-predict(ldafit, newdata=dataToUse[-inTrain,-31],type="raw")
table(lp,dataToUse$Class[-inTrain])
### QDA
qdafit<-train(Class~.,data=dataToUse[inTrain,],method="qda",tuneLength=15,trControl=ctrl)
qp<-predict(qdafit, newdata=dataToUse[-inTrain,-31],type="raw")
table(qp,dataToUse$Class[-inTrain])
### MDA
mdafit<-train(Class~.,data=dataToUse[inTrain,],method="mda",tuneLength=15,trControl=ctrl)
mp<-predict(mdafit, newdata=dataToUse[-inTrain,-31],type="raw")
table(mp,dataToUse$Class[-inTrain])
### NB
NBfit<-train(Class~.,data=dataToUse[inTrain,],method="nb",tuneLength=15,trControl=ctrl)
NBp<-predict(NBfit,newdata=dataToUse[-inTrain,-31],type="raw")
table(NBp,dataToUse$Class[-inTrain])
### cart
cartfit<-train(Class~.,data=dataToUse[inTrain,],method="rpart",tuneLength=15,trControl=ctrl)
cartp<-predict(cartfit, newdata=dataToUse[-inTrain,-31],type="raw")
table(cartp,dataToUse$Class[-inTrain])
### RF
RFfit<-train(Class~.,data=dataToUse[inTrain,],method="ranger",tuneLength=4,trControl=ctrl)
RFp<-predict(RFfit, newdata=dataToUse[-inTrain,],type="raw")
table(RFp,dataToUse$Class[-inTrain])
### regLogistic
RegLogfit<-train(Class~.,data=dataToUse[inTrain,],method="regLogistic",tuneLength=15,trControl=ctrl)
RegLogfitp<-predict(RegLogfit, newdata=dataToUse[-inTrain,],type="raw")
table(RegLogfitp,dataToUse$Class[-inTrain])
|
a6205cb17293408a003cd962194e1eac4018e213 | b0e67da3d682361815916f5c0f312c8875a694db | /cachematrix.R | 5b67936b7922e2023264ea41fcaef95dc0d6fcfd | [] | no_license | junangst/ProgrammingAssignment2 | dcf1cd26761ad782a22d3f54d33d03b40f8de779 | baebf271d1f1af779e30aed026aab3e18cfdb2db | refs/heads/master | 2021-07-09T00:51:20.779386 | 2017-10-05T18:00:50 | 2017-10-05T18:00:50 | 103,785,684 | 0 | 0 | null | 2017-09-16T21:43:25 | 2017-09-16T21:43:25 | null | UTF-8 | R | false | false | 1,163 | r | cachematrix.R | ## The two functions included in this program can be used in
## tandom to cache the inverse of a matrix rather than compute
## it repeatedly throughout a session. The matrix argument for
## the function MakeCacheMatrix must be an invertible square
## matrix. The input argument for the function cacheSolve must
## be an object of type makeCacheMatrix().
## The function makeCacheMatrix creates an object that can
## store the inverse of the matrix argument.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function cacheSolve checks if the inverse for the
## has already been calculated. If so, the existing solution
## is returned. If not, the solution is calculated and returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)){
message("getting cached inverse matrix")
return(i)
}
data <- x$get()
i <- solve(data,...)
x$setinv(i)
i
}
|
2165f3e69dea97ee7e6f99e2828bb9e4706590d5 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /tbart/man/euc.dists.Rd | fde2639f226de65216f9c092fc7378d05d56a236 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 812 | rd | euc.dists.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{euc.dists}
\alias{euc.dists}
\title{Euclidean distances from a Spatial* or Spatial*DataFrame object}
\usage{
euc.dists(swdf1, swdf2, scale)
}
\arguments{
\item{swdf1}{- First Spatial*DataFrame object}
\item{swdf2}{- Second Spatial*DataFrame object (if omitted, defaults to the same value as \code{swdf1})}
\item{scale}{- allows re-scaling eg: value of 1000 means distances in km if coordinates of \code{swdf1}/\code{swdf2} in meters.}
}
\value{
Distance matrix (if \code{swdf1} or \code{swdf2} not SpatialPoints*, distances are based on points obtained from \code{coordinates} function)
}
\description{
Euclidean distances from a Spatial* or Spatial*DataFrame object
}
\examples{
data(meuse)
coordinates(meuse) <- ~x+y
euc.dists(meuse,scale=1000)
}
|
50b17a0e116456c9b0206468590faaab1a7ecdc6 | 27d5fc6f8901345834950f354bbdd08a9ed729ba | /ladder_pL.R | 75f47f48acaaf9fb787b1ca29cbf8b8a808631ff | [] | no_license | dtharvey/EquilibriumDiagrams | 56b23a713794b0c749f0e87b939230a950cb8b0e | 4e2c08f1c6e6a609f0c4826824739830357224b8 | refs/heads/master | 2021-01-15T15:31:10.496861 | 2016-06-27T19:32:29 | 2016-06-27T19:32:29 | 51,866,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,687 | r | ladder_pL.R | # function to plot ladder diagram for metal-ligand complexes
# pk_list: list of pK values, in order of fewest ligands to most
# ligands; default values are for cadmium-ammine complexes
# ligands: list giving number of ligands added for each pK value;
# defaults to vector of (1, 1, 1, 1) for the four stepwise
# cadmium-amine complexes
# pL_axis: logical; defaults to FALSE but TRUE draws pLigand axis
# pL_limit: limits for pLigand axis; defaults to 0 to 14
# type: the type of ladder diagram; options are "arrow," which is the
# default, or "strip"
# buffer: logical; defaults to FALSE, but TRUE will add buffer regions
# species: option to enter name of weak acid to add as title for plot;
# defaults to NULL, which supresses title
# labels: option to enter vector of labels for legend; defaults to
# NULL, which uses a default legend
# locate: x-axis location of arrow or center of strip; defaults to 2,
# which is practical lower limit; increase in steps of three
# will separate diagrams; practical upper limit is 12
# overlay: logical; defaults to FALSE, but setting to TRUE allows for
# adding a new ladder diagram
library(shape)
ladder_pL = function(pk_list = c(2.55, 2.01, 1.34, 0.84),
ligands = c(1, 1, 1, 1),
pL_axis = FALSE,
pL_limit = c(0, 14),
type = "arrow",
shade = "color",
buffer = FALSE,
species = NULL,
labels = NULL,
locate = 2,
overlay = FALSE){
# initial set-up; creates vector of limits for adding labels;
# creates counter, n, for the number of alpha values;
# sets colors for strip version of ladder diagram
pks = pk_list/ligands
n = length(pks)
limits = c(pL_limit[2], pks, pL_limit[1])
if (shade == "color") {
col.func = colorRampPalette(c("lightyellow2", "steelblue2"))
colors = col.func(n + 1)
} else {
col.func = colorRampPalette(c("gray70", "gray30"))
colors = col.func(n + 1)
}
# creates default set of alpha labels if labels are not provided
if (is.null(labels) == TRUE) {
labels = rep(0, n + 1)
labels[1] = expression(alpha[0])
num.ligands = 0
for (i in 1:(n)) {
num.ligands = num.ligands + ligands[i]
labels[i + 1] = eval(substitute(expression(alpha[I]),
list(I = num.ligands)))
}
}
# routines for plotting the ladder diagrams for each possible set
# of options: new or overlay; arrow or strip; with or without
# pH axis, and with or without buffer regions
if (overlay == FALSE) {if (pL_axis == FALSE) {
pLax = "n"
pLlabel = "pLigand"
pLaxis = ""
} else {
pLax = "s"
pLlabel = ""
pLaxis = "pLigand"
}
plot(NULL, xlim = c(0,14), ylim = c(pL_limit[1],pL_limit[2]),
type = "n", xaxt = "n", yaxt = pLax,
bty = "n", xlab = "", ylab = pLaxis,
xaxs = "i", yaxs = "i")
text(locate + 0.25, pL_limit[2] - (pL_limit[2] - pL_limit[1])/25,
pLlabel, pos = 4)
}
if (type == "arrow") {
Arrows(locate, pL_limit[1], locate, pL_limit[2], lwd = 2,
arr.type = "simple")
segments(x0 = rep(locate - 0.3, n), y0 = pks,
x1 = rep(locate + 0.3, n), y1 = pks, lwd = 2)
} else if (type == "strip") {
for (i in 1:(n + 1)) {
filledrectangle(mid = c(locate, (limits[i] + limits[i + 1])/2),
wx = 0.5, wy = limits[i + 1] - limits[i],
col = colors[i], lcol = "black")
}
} else {
return(paste(type, " is not an option.", sep = ""))
}
for (i in 1:n) {
text(x = locate + 0.25, y = pks[i],
labels = pks[i], pos = 4)
}
for (i in 1:(n + 1)){
text(x = locate - 0.25, y = (limits[i + 1] + limits[i])/2,
labels[i], pos = 2)
}
if (buffer == TRUE) {
if (n == 1) {
segments(x0 = locate, y0 = pks - 1/ligands, x1 = locate,
y1 = pks + 1/ligands, lwd = 5, lend = "butt")
} else { for (i in 1:n) {
if (i %% 2 == 0){
segments(x0 = locate + 0.05, y0 = pks[i] - 1/ligands[i],
x1 = locate + 0.05, y1 = pks[i] + 1/ligands[i],
lwd = 5, lend = "butt")
} else {
segments(x0 = locate - 0.05, y0 = pks[i] - 1/ligands[i],
x1 = locate - 0.05, y1 = pks[i] + 1/ligands[i],
lwd = 5, lend = "butt")
}
}
}
}
if (is.null(species) == FALSE) {
text(x = locate - 1, y = pL_limit[2], species, pos = 2,
srt = 90, col = "darkred")
}
}
# code to test
# ladder_pL(pL_axis = TRUE, type = "arrow", species = "cadmium-ammonia",
# locate = 2, pL_limit = c(0, 5))
# ladder_pL(type = "arrow", locate = 5, overlay = TRUE,
# pk_list = c(6.87, 2.03), ligands = c(3, 1),
# species = "zinc-ammonia", pL_limit = c(0, 5))
# ladder_pL(type = "strip", species = "cadmiium-ammonia", locate = 8,
# pL_limit = c(0, 5), shade = "color", overlay = TRUE)
# ladder_pL(type = "strip", locate = 11, overlay = TRUE,
# pL_limit = c(0, 5), pk_list = c(6.87, 2.03), ligands = c(3, 1),
# species = "zinc-ammonia", shade = "gray")
|
2e4ec6e70f255f593e28d694a3ede115a7e6107b | f9160c8b91e325e98f0b9eb71759781ea8ac40f7 | /GeneSetAnalysis.R | 6e97a09a8c571e3100eebff3dc4af18d2de473c1 | [] | no_license | fcaramia/ExpressionAnalysis | b7e74ae07bc20737eb9ec0851e0519116d44b627 | 39eaaa7889b19520c45da1de9356ba447c6631ef | refs/heads/master | 2020-03-17T20:39:24.301616 | 2018-05-18T07:58:42 | 2018-05-18T07:58:42 | 133,922,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,941 | r | GeneSetAnalysis.R | library(GSVA)
library(Biobase)
library(GSEABase)
library(GSA)
library(EGSEA)
library(biomaRt)
GetHumanGeneSymbolsfromENSEMBL <- function(ensembl.symbols = NULL)
{
mart.hs <- useMart("ensembl", "hsapiens_gene_ensembl")
res = getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"),
filters = "ensembl_gene_id", values = ensembl.symbols, mart = mart.hs)
return(res)
}
GetMouseOrtho <- function(human.symbols = NULL)
{
mart.hs <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
ensembl.ids = getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"),
filters = "hgnc_symbol", values = human.symbols, mart = mart.hs)
ensembl.ids = getBM(attributes = c("ensembl_gene_id", "mmusculus_homolog_ensembl_gene"),
filters = "ensembl_gene_id", values = ensembl.ids$ensembl_gene_id, mart = mart.hs)
mart.hs <- useMart("ensembl", "mmusculus_gene_ensembl")
res = getBM(attributes = c("mgi_symbol", "ensembl_gene_id"),
filters = "ensembl_gene_id", values = ensembl.ids$mmusculus_homolog_ensembl_gene, mart = mart.hs)
return(res)
}
GetHumanOrtho <- function(mouse.symbols = NULL)
{
mart.hs <- useMart("ensembl", "mmusculus_gene_ensembl")
ensembl.ids = getBM(attributes = c("mgi_symbol", "ensembl_gene_id"),
filters = "mgi_symbol", values = mouse.symbols, mart = mart.hs)
ensembl.ids = getBM(attributes = c("ensembl_gene_id", "hsapiens_homolog_ensembl_gene"),
filters = "ensembl_gene_id", values = ensembl.ids$ensembl_gene_id, mart = mart.hs)
mart.hs <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
res = getBM(attributes = c("ensembl_gene_id", "hgnc_symbol"),
filters = "ensembl_gene_id", values = ensembl.ids$hsapiens_homolog_ensembl_gene, mart = mart.hs)
return(res)
}
GetEntrezIDs <- function(symbols = NULL)
{
mart.hs <- useMart("ensembl", "hsapiens_gene_ensembl")
res = getBM(attributes = c("hgnc_symbol", "entrezgene"),
filters = "hgnc_symbol", values = symbols, mart = mart.hs)
return(res)
}
GetEntrezIDsMouse <- function(symbols = NULL)
{
mart.hs <- useMart("ENSEMBL_MART_MOUSE", "mc57bl6nj_gene_ensembl")
res = getBM(attributes = c("mgi_symbol", "entrezgene"),
filters = "mgi_symbol", values = symbols, mart = mart.hs)
return(res)
}
GetGeneSymbolsHuman <- function(entrezgenes = NULL)
{
mart.hs <- useMart("ENSEMBL_MART_ENSEMBL", "hsapiens_gene_ensembl",verbose = T,host = 'asia.ensembl.org')
res = getBM(attributes = c("hgnc_symbol", "entrezgene"),
filters = "entrezgene", values = entrezgenes, mart = mart.hs)
return(res)
}
RunGsva <- function(mat, gmt,method="gsva",rnaseq=F,ssgsea.norm=T,abs.ranking=F){
# rnaseq=T # ONLY FOR RAW COUNT INPUT
test = gsva(mat,gmt[[1]],method=method,rnaseq=rnaseq,ssgsea.norm=ssgsea.norm,abs.ranking=abs.ranking)
#save_gsva(test,out,method)
}
SaveGsva <- function(gsva_out, file.name,method){
if(method=="gsva"){
es.obs = as.data.frame(gsva_out$es.obs)
}
else if(method=="ssgsea"){
es.obs = as.data.frame(gsva_out)
}
es.obs$GeneSet = rownames(es.obs)
es.obs = es.obs[,c(ncol(es.obs),1:(ncol(es.obs)-1))]
write.table(es.obs,file=file.name,sep=",",quote=F,col.names=T,row.names=F)
}
GetDefaultGMT <- function(gmt)
{
if(gmt == 'C7')res = GSA.read.gmt('~/Documents/Work/analysis/GeneSets/c7.all.v6.0.symbols.gmt')
if(gmt == 'C5')res = GSA.read.gmt('~/Documents/Work/analysis/GeneSets/c5.all.v6.0.symbols.gmt')
if(gmt == 'H')res = GSA.read.gmt('~/Documents/Work/analysis/GeneSets/h.all.v6.0.symbols.gmt')
names(res[[1]]) = res[[2]]
return(res)
}
BuildIndexes <- function(genes,spec='human',msigdb.gsets='all')
{
gs.annots = buildIdx(entrezIDs = genes, species = spec,
msigdb.gsets = msigdb.gsets, go.part = TRUE)
return(gs.annots)
}
|
b0885e3d047e72ded156e1ff4d3b84e5fe3c69db | b1754ec13322df2d42761d961af0dabca4517ee5 | /Code/12outputSA.R | da61db698ea31bbca9958df99e0807a36dbbaabf | [] | no_license | StephenStyles/StochasticApproximation | 44040c5676ebc0ad7016fc689f578ec993286bdb | 9ce8fe2b0fde38737b1c8b6e1c9fe35cc8656f3c | refs/heads/main | 2023-03-24T08:36:15.296573 | 2021-03-11T05:34:56 | 2021-03-11T05:34:56 | 305,183,444 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,788 | r | 12outputSA.R | setwd("C:/Users/sjsty/Desktop/Masters/Algorithms/Simplified Problem")
leakyrelu <- function(eps,x){
if(x<0){
return(eps*x)
} else{
return(x)
}
}
sigmoid <- function(x){
y = sum(x)
return(exp(y)/(1+exp(y)))
}
score <- function(x){
rank = NULL
for(i in 1:length(x)){
rank[i] = exp(-x)
}
return(rank)
}
update <- function(A,b,w,i,n,chi){
wnew = w + (1/((n+i)^chi) * (b-A%*%w))
return(wnew)
}
mse <- function(x,y){
temp = NULL
for(i in 1:length(x)){
temp[i] = (x[i]-y[i])^2
}
return(mean(temp))
}
########################################################################################################
# SKLearn Neural Network starting points
########################################################################################################
bias1 = t(read.table("12firstlayerint40.csv",sep = ","))
weights1 = read.table("12firstlayercoef40.csv",sep = ",")
w1df = rbind(bias1, weights1)
w1 = as.numeric(c(w1df[1,],w1df[2,],w1df[3,],w1df[4,],w1df[5,]))
bias2 = t(read.table("12secondlayerint40.csv",sep = ","))
weights2 = read.table("12secondlayercoef40.csv",sep = ",")
w2df = rbind(bias2, weights2)
w2 = as.numeric(c(w2df[1,],w2df[2,],w2df[3,],w2df[4,],w2df[5,],
w2df[6,],w2df[7,],w2df[8,],w2df[9,],w2df[10,],
w2df[11,],w2df[12,],w2df[13,],w2df[14,],w2df[15,],
w2df[16,],w2df[17,],w2df[18,],w2df[19,],w2df[20,],
w2df[21,]))
##########################################################################################################
# FULL NN Starting points
##########################################################################################################
bias1 = t(read.table("final12firstlayerint.csv",sep = ","))
weights1 = read.table("final12firstlayercoef.csv",sep = ",")
w1df = rbind(bias1, weights1)
w1 = as.numeric(c(w1df[1,],w1df[2,],w1df[3,],w1df[4,],w1df[5,]))
bias2 = t(read.table("final12secondlayerint.csv",sep = ","))
weights2 = read.table("final12secondlayercoef.csv",sep = ",")
w2df = rbind(bias2, weights2)
w2 = as.numeric(c(w2df[1,],w2df[2,],w2df[3,],w2df[4,],w2df[5,],
w2df[6,],w2df[7,],w2df[8,],w2df[9,],w2df[10,],
w2df[11,],w2df[12,],w2df[13,],w2df[14,],w2df[15,],
w2df[16,],w2df[17,],w2df[18,],w2df[19,],w2df[20,],
w2df[21,]))
##########################################################################################################
# Initial Data to estimate
##########################################################################################################
SAdata = read.table("12outputtraindata.txt", sep = "\t", header= TRUE)
row.names(SAdata) = NULL
smp_size <- floor(0.75 * nrow(SAdata))
train_ind <- sample(seq_len(nrow(SAdata)), size = smp_size)
traindata = SAdata[train_ind,]
row.names(traindata) = NULL
testdata = SAdata[-train_ind,]
row.names(testdata) = NULL
eps = 0.0001
###############################################################################################################################################
# Finding initial accuracy
###############################################################################################################################################
validationdata = read.table("12outputNNdata.txt", sep = "\t", header= TRUE)
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
for (s in 1:6000){
x1 = as.numeric(validationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
o2 = Xm %*% w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,o2[j])
}
}
validationdata_OG=cbind(validationdata,estimates)
correctclass = data.frame(correct = numeric(), error = numeric())
for(s in 1:6000){
y = as.numeric(validationdata_OG[s,17:28])
x = as.numeric(validationdata_OG[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
correctclass[s,2] = mse(x,y)
}
validationdata_OG=cbind(validationdata,estimates,correctclass)
acc = c(sum(correctclass$correct))
meanerror=c(sum(correctclass$error))
acc = c(5880)
meanerror=c(2180.471)
###################################################################################################################################
# Starting SA
###################################################################################################################################
w1_avg = w1
w2_avg = w2
weight1_log = matrix(NA, nrow=length(w1), ncol=0)
weight2_log = matrix(NA, nrow=length(w2), ncol=0)
weight1_log = cbind(weight1_log,w1)
weight2_log = cbind(weight2_log,w2)
n = 3000
m = n%/%3
runthrough = 0
epoc=runthrough*250000
for(t in 1:250){
a = n*(t-1)+1
b = n*t
c = m*(t-1)+1
d = m*t
data1 = traindata[a:b,]
data2 = testdata[c:d,]
inputs1 = data.frame(x1_1=numeric(),x1_2=numeric(),x1_3=numeric(),x1_4=numeric())
outputs1 = data.frame(h1=numeric(),h2=numeric(),h3=numeric(),h4=numeric(),h5=numeric(),
h6=numeric(),h7=numeric(),h8=numeric(),h9=numeric(),h10=numeric(),
h11=numeric(),h12=numeric(),h13=numeric(),h14=numeric(),h15=numeric(),
h16=numeric(),h17=numeric(),h18=numeric(),h19=numeric(),h20=numeric())
inputs2 = data.frame(x2_1=numeric(),x2_2=numeric(),x2_3=numeric(),x2_4=numeric(),x2_5=numeric(),
x2_6=numeric(),x2_7=numeric(),x2_8=numeric(),x2_9=numeric(),x2_10=numeric(),
x2_11=numeric(),x2_12=numeric(),x2_13=numeric(),x2_14=numeric(),x2_15=numeric(),
x2_16=numeric(),x2_17=numeric(),x2_18=numeric(),x2_19=numeric(),x2_20=numeric())
outputs2 = data.frame(o1=numeric(),o2=numeric(),o3=numeric(),o4=numeric(),o5=numeric(),o6=numeric(),
o7=numeric(),o8=numeric(),o9=numeric(),o10=numeric(),o11=numeric(),o12=numeric())
finalest = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
trueval = data.frame(y1=numeric(),y2=numeric(),y3=numeric(),y4=numeric(),y5=numeric(),y6=numeric(),
y7=numeric(),y8=numeric(),y9=numeric(),y10=numeric(),y11=numeric(),y12=numeric())
for(i in 1:n){
inputs1[i,] = data1[i,1:4]
x1 = as.numeric(inputs1[i,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
outputs1[i,] = Xm %*% w1
for(j in 1:20){
inputs2[i,j] = leakyrelu(eps,outputs1[i,j])
}
x2 = as.numeric(inputs2[i,1:20])
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
outputs2[i,] = Xm %*% w2
for(j in 1:12){
finalest[i,j] = leakyrelu(eps,outputs2[i,j])
}
trueval[i,] = data1[i,5:16]
}
approx = cbind(inputs1,outputs1,inputs2,outputs2,finalest,trueval)
error = data.frame(err = numeric())
for(i in 1:n){
x = as.numeric(approx[i,57:68])
x = x/sum(x)
y = as.numeric(approx[i,69:80])
error[i,1] = mse(x,y)
}
approx = cbind(approx,error)
correctclass = data.frame(correct = numeric())
for(i in 1:n){
y = as.numeric(approx[i,57:68])
x = as.numeric(approx[i,69:80])
if(max(y)!=0){
for( j in 1:12){
if(y[j] == max(y)){
y[j]=1
} else{
y[j]=0
}
}
}
correctclass[i,1] = t(x) %*% y
}
approx = cbind(approx,correctclass)
truevalues = approx[which(approx$correct == 1),]
truey1 = truevalues[which(truevalues$y1 == 1),]
truey2 = truevalues[which(truevalues$y2 == 1),]
truey3 = truevalues[which(truevalues$y3 == 1),]
truey4 = truevalues[which(truevalues$y4 == 1),]
truey5 = truevalues[which(truevalues$y5 == 1),]
truey6 = truevalues[which(truevalues$y6 == 1),]
truey7 = truevalues[which(truevalues$y7 == 1),]
truey8 = truevalues[which(truevalues$y8 == 1),]
truey9 = truevalues[which(truevalues$y9 == 1),]
truey10 = truevalues[which(truevalues$y10 == 1),]
truey11 = truevalues[which(truevalues$y11 == 1),]
truey12 = truevalues[which(truevalues$y12 == 1),]
sy1 = sapply(truey1[,81],score);sy1 = sy1/sum(sy1)
sy2 = sapply(truey2[,81],score);sy2 = sy2/sum(sy2)
sy3 = sapply(truey3[,81],score);sy3 = sy3/sum(sy3)
sy4 = sapply(truey4[,81],score);sy4 = sy4/sum(sy4)
sy5 = sapply(truey5[,81],score);sy5 = sy5/sum(sy5)
sy6 = sapply(truey6[,81],score);sy6 = sy6/sum(sy6)
sy7 = sapply(truey7[,81],score);sy7 = sy7/sum(sy7)
sy8 = sapply(truey8[,81],score);sy8 = sy8/sum(sy8)
sy9 = sapply(truey9[,81],score);sy9 = sy9/sum(sy9)
sy10 = sapply(truey10[,81],score);sy10 = sy10/sum(sy10)
sy11 = sapply(truey11[,81],score);sy11 = sy11/sum(sy11)
sy12 = sapply(truey12[,81],score);sy12 = sy12/sum(sy12)
for(i in 1:m){
x = as.numeric(data2[i,1:4])
data = NULL
y2 = 10*as.numeric(data2[i,5:16])
if(y2[1] == 10){
data = truey1
sc = sy1
} else if(y2[2] == 10){
data = truey2
sc = sy2
} else if(y2[3] == 10){
data = truey3
sc = sy3
} else if(y2[4] == 10){
data = truey4
sc = sy4
} else if(y2[5] == 10){
data = truey5
sc = sy5
} else if(y2[6] == 10){
data = truey6
sc = sy6
} else if(y2[7] == 10){
data = truey7
sc = sy7
} else if(y2[8] == 10){
data = truey8
sc = sy8
} else if(y2[9] == 10){
data = truey9
sc = sy9
} else if(y2[10] == 10){
data = truey10
sc = sy10
} else if(y2[11] == 10){
data = truey11
sc = sy11
} else if(y2[12] == 10){
data = truey12
sc = sy12
}
p = sample(1:dim(data)[1],size=1,prob=sc)
X1 = cbind(1*diag(20),x[1]*diag(20),x[2]*diag(20),x[3]*diag(20),x[4]*diag(20))
y1 = as.numeric(data[p,5:24])
x2 = as.numeric(data[p,25:44])
X2 = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
if(i>=1){
A1 = t(X1)%*%X1
A2 = t(X2)%*%X2
B1 = t(X1)%*%y1
B2 = t(X2)%*%y2
}
#if(i>2){
# w1_avg = w1/(c+epoc-1+i)+ (c+epoc+i-2)/(c+epoc-1+i)*w1_avg
# w2_avg = w2/(c+epoc-1+i)+ (c+epoc+i-2)/(c+epoc-1+i)*w2_avg
#}
#if( i>= 2){
# A1 = t(X1)%*%X1
# A1 = t(X1)%*%X1/(c-1+i) + (c-1+i-1)/(c-1+i)*A1
# A2 = t(X2)%*%X2/(c-1+i) + (c-1+i-1)/(c-1+i)*A2
# B1 = t(X1)%*%y1/(c-1+i) + (c-1+i-1)/(c-1+i)*B1
# B1 = t(X1)%*%y1
# B2 = t(X2)%*%y2/(c-1+i) + (c-1+i-1)/(c-1+i)*B2
#}
w1 = update(A1, B1,w1,i,25000+c+epoc,0.75)
w2 = update(A2, B2,w2,i,25000+c+epoc,0.55)
if(i==1000){
step = t + runthrough*250+1
tempvalidationdata = validationdata
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
#estimates_avg = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
# yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
for (s in 1:6000){
x1 = as.numeric(tempvalidationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
o2 = Xm %*% w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,o2[j])
}
}
#for (s in 1:6000){
# x1 = as.numeric(tempvalidationdata[s,1:4])
# Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
# output1 = Xm %*% w1_avg
# x2 = c(rep(0,20))
# for(j in 1:20){
# x2[j] = leakyrelu(eps,output1[j])
# }
# Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
# x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
# x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
# x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
# x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
# o2 = Xm %*% w2_avg
# for(j in 1:12){
# estimates_avg[s,j] = leakyrelu(eps,o2[j])
# }
#
#}
tempvalidationdata=cbind(tempvalidationdata,estimates)
correctclass = data.frame(correct = numeric())
for(s in 1:6000){
#y_avg = as.numeric(tempvalidationdata[s,29:40])
y = as.numeric(tempvalidationdata[s,17:28])
x = as.numeric(tempvalidationdata[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
#temp2 = rep(0,12)
#temp2[which.max(y_avg)]=1
#correctclass[s,3] = t(x) %*% temp2
#correctclass[s,4] = mse(x,y_avg)
}
acc = c(acc,sum(correctclass$correct))
#meanerror_avg = c(meanerror_avg, sum(correctclass$error_avg))
#acc_avg = c(acc_avg,sum(correctclass$correct_avg))
weight1_log = cbind(weight1_log,w1)
weight2_log = cbind(weight2_log,w2)
plot(acc/6000,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
lines(1:step,NN_ACC[1:step]/6000, type = "l", col = "red", lwd = 2)
#plot(meanerror,type = "l", col = "blue", xlab = "1000 Steps", ylab = "MSE")
#plot(acc_avg/6000,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
#plot(meanerror_avg,type = "l", col = "red", xlab = "1000 Steps", ylab = "MSE")
}
}
}
######################################################################################################################
# Plotting the weights changing over time
######################################################################################################################
library(scales)
col_pal = rainbow(n=nrow(weight1_log))
plot(c(weight1_log), ylim=range(weight1_log), type='n', xlim=c(1,ncol(weight1_log)), main = "Weight Log Layer 1", xlab = "1000 Steps", ylab = "Weights") # type 'n' plots nothing
for(i in 1:length(w1)){
points(weight1_log[i,], type='l', pch=19, col=alpha(col_pal[i], 0.5)) # alpha f'n sets colour opacity
}
col_pal = rainbow(n=nrow(weight2_log))
plot(c(weight2_log), ylim=range(weight2_log), type='n', xlim=c(1,ncol(weight2_log)), main = "Weight Log Layer 2", xlab = "1000 Steps", ylab = "Weights") # type 'n' plots nothing
for(i in 1:length(w2)){
points(weight2_log[i,], type='l', pch=19, col=alpha(col_pal[i], 0.5)) # alpha f'n sets colour opacity
}
plot(acc/6000,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
write.table(weight1_log, "weight1log.txt", sep="\t")
write.table(weight2_log, "weight2log.txt", sep="\t")
############################################################################################################################################################################################
# changing to the shifted data
#############################################################################################################################################################################################
new_w1 = w1
new_w2 = w2
new_weight1_log = matrix(NA, nrow=length(new_w1), ncol=0)
new_weight2_log = matrix(NA, nrow=length(new_w2), ncol=0)
new_weight1_log = cbind(new_weight1_log,new_w1)
new_weight2_log = cbind(new_weight2_log,new_w2)
SAdata = read.table("shiftedtraindata.txt", sep = "\t", header= TRUE)
dim(SAdata)
SAdata = SAdata[complete.cases(SAdata),]
dim(SAdata)
row.names(SAdata) = NULL
smp_size <- floor(0.75 * nrow(SAdata))
train_ind <- sample(seq_len(nrow(SAdata)), size = smp_size)
traindata = SAdata[train_ind,]
row.names(traindata) = NULL
testdata = SAdata[-train_ind,]
row.names(testdata) = NULL
eps = 0.0001
validationdata = read.table("shiftedvalidationset.txt", sep = "\t", header= TRUE)
validationdata = validationdata[complete.cases(validationdata),]
val_len = dim(validationdata)[1]
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
dim(validationdata)
for (s in 1:val_len){
x1 = as.numeric(validationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% new_w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
o2 = Xm %*% new_w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,o2[j])
}
}
validationdata=cbind(validationdata,estimates)
correctclass = data.frame(correct = numeric(), error= numeric())
for(s in 1:val_len){
y = as.numeric(validationdata[s,17:28])
x = as.numeric(validationdata[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
correctclass[s,2] = mse(x,y)
}
validationdata=cbind(validationdata,estimates,correctclass)
new_acc = c(sum(correctclass$correct))
new_acc
n = 3000
m = n%/%3
for(t in 1:250){
a = n*(t-1)+1
b = n*t
c = m*(t-1)+1
d = m*t
data1 = traindata[a:b,]
data2 = testdata[c:d,]
inputs1 = data.frame(x1_1=numeric(),x1_2=numeric(),x1_3=numeric(),x1_4=numeric())
outputs1 = data.frame(h1=numeric(),h2=numeric(),h3=numeric(),h4=numeric(),h5=numeric(),
h6=numeric(),h7=numeric(),h8=numeric(),h9=numeric(),h10=numeric(),
h11=numeric(),h12=numeric(),h13=numeric(),h14=numeric(),h15=numeric(),
h16=numeric(),h17=numeric(),h18=numeric(),h19=numeric(),h20=numeric())
inputs2 = data.frame(x2_1=numeric(),x2_2=numeric(),x2_3=numeric(),x2_4=numeric(),x2_5=numeric(),
x2_6=numeric(),x2_7=numeric(),x2_8=numeric(),x2_9=numeric(),x2_10=numeric(),
x2_11=numeric(),x2_12=numeric(),x2_13=numeric(),x2_14=numeric(),x2_15=numeric(),
x2_16=numeric(),x2_17=numeric(),x2_18=numeric(),x2_19=numeric(),x2_20=numeric())
outputs2 = data.frame(o1=numeric(),o2=numeric(),o3=numeric(),o4=numeric(),o5=numeric(),o6=numeric(),
o7=numeric(),o8=numeric(),o9=numeric(),o10=numeric(),o11=numeric(),o12=numeric())
finalest = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
trueval = data.frame(y1=numeric(),y2=numeric(),y3=numeric(),y4=numeric(),y5=numeric(),y6=numeric(),
y7=numeric(),y8=numeric(),y9=numeric(),y10=numeric(),y11=numeric(),y12=numeric())
for(i in 1:n){
inputs1[i,] = data1[i,1:4]
x1 = as.numeric(inputs1[i,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
outputs1[i,] = Xm %*% new_w1
for(j in 1:20){
inputs2[i,j] = leakyrelu(eps,outputs1[i,j])
}
x2 = as.numeric(inputs2[i,1:20])
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
outputs2[i,] = Xm %*% new_w2
for(j in 1:12){
finalest[i,j] = leakyrelu(eps,outputs2[i,j])
}
trueval[i,] = data1[i,5:16]
}
approx = cbind(inputs1,outputs1,inputs2,outputs2,finalest,trueval)
error = data.frame(err = numeric())
for(i in 1:n){
x = as.numeric(approx[i,57:68])
x = x/sum(x)
y = as.numeric(approx[i,69:80])
error[i,1] = mse(x,y)
}
approx = cbind(approx,error)
correctclass = data.frame(correct = numeric())
for(i in 1:n){
y = as.numeric(approx[i,57:68])
x = as.numeric(approx[i,69:80])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[i,1] = t(x) %*% temp
}
approx = cbind(approx,correctclass,row.names = NULL)
truevalues = approx[which(approx$correct == 1),]
truey1 = truevalues[which(truevalues$y1 == 1),]
truey2 = truevalues[which(truevalues$y2 == 1),]
truey3 = truevalues[which(truevalues$y3 == 1),]
truey4 = truevalues[which(truevalues$y4 == 1),]
truey5 = truevalues[which(truevalues$y5 == 1),]
truey6 = truevalues[which(truevalues$y6 == 1),]
truey7 = truevalues[which(truevalues$y7 == 1),]
truey8 = truevalues[which(truevalues$y8 == 1),]
truey9 = truevalues[which(truevalues$y9 == 1),]
truey10 = truevalues[which(truevalues$y10 == 1),]
truey11 = truevalues[which(truevalues$y11 == 1),]
truey12 = truevalues[which(truevalues$y12 == 1),]
sy1 = sapply(truey1[,81],score);sy1 = sy1/sum(sy1)
sy2 = sapply(truey2[,81],score);sy2 = sy2/sum(sy2)
sy3 = sapply(truey3[,81],score);sy3 = sy3/sum(sy3)
sy4 = sapply(truey4[,81],score);sy4 = sy4/sum(sy4)
sy5 = sapply(truey5[,81],score);sy5 = sy5/sum(sy5)
sy6 = sapply(truey6[,81],score);sy6 = sy6/sum(sy6)
sy7 = sapply(truey7[,81],score);sy7 = sy7/sum(sy7)
sy8 = sapply(truey8[,81],score);sy8 = sy8/sum(sy8)
sy9 = sapply(truey9[,81],score);sy9 = sy9/sum(sy9)
sy10 = sapply(truey10[,81],score);sy10 = sy10/sum(sy10)
sy11 = sapply(truey11[,81],score);sy11 = sy11/sum(sy11)
sy12 = sapply(truey12[,81],score);sy12 = sy12/sum(sy12)
for(i in 1:m){
x = as.numeric(data2[i,1:4])
data = NULL
y2 = 10*as.numeric(data2[i,5:16])
if(y2[1] == 10){
data = truey1
sc = sy1
} else if(y2[2] == 10){
data = truey2
sc = sy2
} else if(y2[3] == 10){
data = truey3
sc = sy3
} else if(y2[4] == 10){
data = truey4
sc = sy4
} else if(y2[5] == 10){
data = truey5
sc = sy5
} else if(y2[6] == 10){
data = truey6
sc = sy6
} else if(y2[7] == 10){
data = truey7
sc = sy7
} else if(y2[8] == 10){
data = truey8
sc = sy8
} else if(y2[9] == 10){
data = truey9
sc = sy9
} else if(y2[10] == 10){
data = truey10
sc = sy10
} else if(y2[11] == 10){
data = truey11
sc = sy11
} else if(y2[12] == 10){
data = truey12
sc = sy12
}
p = sample(1:dim(data)[1],size=1,prob=sc)
X1 = cbind(1*diag(20),x[1]*diag(20),x[2]*diag(20),x[3]*diag(20),x[4]*diag(20))
y1 = as.numeric(data[p,5:24])
x2 = as.numeric(data[p,25:44])
X2 = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
if(i>=1){
A1 = t(X1)%*%X1
A2 = t(X2)%*%X2
B1 = t(X1)%*%y1
B2 = t(X2)%*%y2
}
#if( i> 2){
# A1 = t(X1)%*%X1/(c-1+i) + (c-1+i-1)/(c-1+i)*A1
# A2 = t(X2)%*%X2/(c-1+i) + (c-1+i-1)/(c-1+i)*A2
# B1 = t(X1)%*%y1/(c-1+i) + (c-1+i-1)/(c-1+i)*B1
# B2 = t(X2)%*%y2/(c-1+i) + (c-1+i-1)/(c-1+i)*B2
#}
new_w1 = update(A1, B1,new_w1,i,25000+c,0.75)
new_w2 = update(A2, B2,new_w2,i,25000+c,0.55)
if(i%%1000 == 0){
validationdata = read.table("shiftedvalidationset.txt", sep = "\t", header= TRUE)
validationdata = validationdata[complete.cases(validationdata),]
val_len = dim(validationdata)[1]
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
dim(validationdata)
for (s in 1:val_len){
x1 = as.numeric(validationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% new_w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
o2 = Xm %*% new_w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,o2[j])
}
}
validationdata=cbind(validationdata,estimates)
correctclass = data.frame(correct = numeric())
for(s in 1:val_len){
y = as.numeric(validationdata[s,17:28])
x = as.numeric(validationdata[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
}
validationdata=cbind(validationdata,estimates,correctclass)
new_acc = c(new_acc,sum(correctclass$correct))
new_weight1_log = cbind(new_weight1_log,new_w1)
new_weight2_log = cbind(new_weight2_log,new_w2)
plot(new_acc/val_len,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
#plot(new_meanerror,type = "l", col = "red", xlab = "1000 Steps", ylab = "MSE")
}
}
}
#########################################################################################################################
# Plotting the shifted weights
#########################################################################################################################
library(scales)
col_pal = rainbow(n=nrow(new_weight1_log))
plot(c(new_weight1_log), ylim=range(new_weight1_log), type='n', xlim=c(1,ncol(new_weight1_log)), main = "Weight Log Layer 1", xlab = "1000 Steps", ylab = "Weights") # type 'n' plots nothing
for(i in 1:length(new_w1)){
points(new_weight1_log[i,], type='l', pch=19, col=alpha(col_pal[i], 0.5)) # alpha f'n sets colour opacity
}
col_pal = rainbow(n=nrow(new_weight2_log))
plot(c(new_weight2_log), ylim=range(new_weight2_log), type='n', xlim=c(1,ncol(new_weight2_log)), main = "Weight Log Layer 2", xlab = "1000 Steps", ylab = "Weights") # type 'n' plots nothing
for(i in 1:length(new_w2)){
points(new_weight2_log[i,], type='l', pch=19, col=alpha(col_pal[i], 0.5)) # alpha f'n sets colour opacity
}
#######################################################################################################################
# writing talbes of accuracy
#######################################################################################################################
acc6590 = acc
write.table(acc6590, "acc6590.txt", sep="\t")
#######################################################################################################################
# Plotting Full NN results
#######################################################################################################################
NN_ACC = t(read.table("NN_ACC.csv",sep = ","))
NN_MSE = t(read.table("NN_MSE.csv",sep = ","))
plot(1:501,NN_ACC/6000, type="l", col = "red", xlab="1000 Steps", ylab = "Accuracy")
plot(1:501,NN_MSE, type="l", col = "red", xlab="1000 Steps", ylab = "MSE")
|
2c3549ad53980af65f7fa671e7af03738710dfff | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/VBTree/examples/dl2vbt.Rd.R | 6f20c4806d22c19ad93fed09110a7a19728a3d2e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | r | dl2vbt.Rd.R | library(VBTree)
### Name: dl2vbt
### Title: Convert a double list to vector binary tree
### Aliases: dl2vbt
### Keywords: Double.List Vector.Binary.Tree
### ** Examples
#Structurize the column names of datatest:
colname <- colnames(datatest)
colnamedl <- chrvec2dl(colname, "-")
colnamevbt <- dl2vbt(colnamedl)
#Simple data cleaning for sub-constructure existing double list;
#Make unregulated double list:
unregdl <- list(c("7", 2, 10), c("chr", "5"), c(),
c("var2", "var1", "var3"), c("M-8-9", "3-2"), c("6-3", "2-7"))
regvbt <- dl2vbt(unregdl)
regvbt2 <- dl2vbt(unregdl, FALSE) # not recommended
|
ca5c03eaad2b75382521485c6fbacc5315eb553c | 272321a59a4ff63c923592930fb9c08573afcfde | /PCA/PCA.R | e9985735dc70858b9c28fb3f6ad4b3ad8d647454 | [
"MIT"
] | permissive | abhi-3453/R-notebook | 9ddc73c55d6753a4f9544bc2bd7ca23d211af9f4 | f15ec5f8114d5043709e39ec8e90dc444317a88e | refs/heads/master | 2020-03-25T23:55:19.017269 | 2018-08-10T14:53:59 | 2018-08-10T14:53:59 | 144,299,027 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,024 | r | PCA.R |
df <- read.csv('Social_Network_Ads.csv')
head(df)
df <- df[-1:-2]
library(ggplot2)
pl1 <- ggplot(df,aes(y=EstimatedSalary,x=Age)) + geom_point(aes(color=factor(Purchased)))
library(caTools)
set.seed(101)
sample = sample.split(df$Purchased, SplitRatio = 0.7)
train = subset(df, sample == T)
test = subset(df,sample == F)
train[-3]=scale(train[-3])
test[-3]=scale(test[-3])
library("kernlab")
gpca <- kpca(~. ,data = train[-3], kernel = 'rbfdot', features = 2)
train_pca = as.data.frame(predict(gpca,train))
train_pca$Purchased = train$Purchased
test_pca = as.data.frame(predict(gpca,test))
test_pca$Purchased = test$Purchased
df1 <- rbind(train_pca,test_pca)
pl <- ggplot(df1,aes(y=V2,x=V1)) + geom_point(aes(color=factor(Purchased)))
model <- glm(Purchased ~. , data = train_pca , family = binomial('logit') )
pred <- predict(model , newdata= test_pca[-3], type = 'response')
xpred <- as.data.frame(pred)
ypred <- ifelse(pred>0.5,1,0)
yypred <- as.data.frame(ypred)
head(ypred)
cm = table(test_pca[,3], ypred)
cm
|
cd90455d9e9dd86d1a8c1867d614438ea275acc4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/cpr/examples/get_spline.Rd.R | 0323e65d19419b65f47ee196544cb29905254065 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 520 | r | get_spline.Rd.R | library(cpr)
### Name: get_spline
### Title: Get the Control Polygon and the Spline Function
### Aliases: get_spline
### ** Examples
data(spdg, package = "cpr")
## Extract the control polygon and spline for plotting. We'll use base R
## graphics for this example.
a_cp <- cp(pdg ~ bsplines(day, df = 10), data = spdg)
cp_and_spline <- get_spline(a_cp)
plot(cp_and_spline$cp, type = "b")
points(cp_and_spline$spline, type = "l")
grid()
# compare to the cpr:::plot.cpr_cp method
plot(a_cp, show_spline = TRUE)
|
f823f475afabdb984fe925101c3592b778f33260 | 1fa426920d0cde02234ffb6d94bbac3453b94c38 | /001-R-Studio Vector.R | 2eec62a200badda033c3a2543555dbd163071b70 | [] | no_license | MyHackInfo/R-Language | 6a7f87e597a1fff1a23b3b8e5458c8f0b4760bc6 | a0d8bb449e3735bfb259e8ac010262d96a91af45 | refs/heads/master | 2020-04-03T19:36:23.714704 | 2018-10-31T09:11:31 | 2018-10-31T09:11:31 | 155,528,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 392 | r | 001-R-Studio Vector.R | # Vector
# Vector is same as array to be c/c++
# Vector use c() function
MyVector <- c(2,3,4,5,6)
MyVector2 <- c(2,3,5,6,"a","b","g3",'4')
MyVector2
# Function use to check element of Vector
is.numeric(MyVector)
is.integer(MyVector)
is.double(MyVector)
seq() # Sequence like ':'
rep() # replicate
tab2 <- seq(1,20,2)
coun1 <- seq(1:20)
tab2
coun1
sameMore <- rep('narsi',5)
sameMore
|
d22969f060a93c6c1d32b4a64c6fdf683f55abc5 | 9c173a9342104e6779fea737e4ebe3b5e066098a | /9 - Developing Data Products/Twitch/test/server.R | db1b440f31d704dded5960767a773900368c3652 | [] | no_license | xush65/Data-Scientist-MOOC | 639fa7ad291ae4607eb361099d74f7a36bd78322 | f91e8f71ce63d82242edfd4c36520211261a01d3 | refs/heads/master | 2021-05-30T09:42:05.456437 | 2014-12-17T10:41:49 | 2014-12-17T10:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | server.R | library(shiny)
shinyUI(
pageWithSidebar(
# application title
headerPanel("Test"),
sidebarPanel(
# label, printed label, default values, step values
numericInput('num', 'Number', 90, min = 50, max = 200, step = 5),
submitButton('Submit'),
# sliderInput('mu', 'Guess at the mean', value = 70, min = 62, max = 74, step = 0.05)
),
mainPanel(
h3('Results'),
h4('You entered'),
verbatimTextOutput("inputValue"),
h4('Which resulted in a prediction of'),
verbatimTextOutput("prediction"),
plotOutput('newHist')
)
)
) |
cbe157b2ed7365b3684c8619e8d820b61cbd590c | d801747059693596b2f5ce7ab7883791bbf02560 | /data_COVID.R | 81f46ea8fafb86ca07069fee927265818d87dc8e | [] | no_license | b-qiu/COVID-19 | af5055054928481d7d75c0003655ecd4f590a836 | 084d5c97acef75bef9b9d994e670335aa6336517 | refs/heads/master | 2022-11-16T10:02:50.951586 | 2020-07-14T11:56:21 | 2020-07-14T11:56:21 | 256,879,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,035 | r | data_COVID.R | library("pacman")
p_load("tidyverse", "rvest", "qdapRegex",
"RCurl", "magrittr", "googlesheets4")
options(gargle_oauth_cache = ".secrets")
gs4_auth(email = XXXX,
cache = ".secrets")
jh <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") %>%
select(-`Province/State`, -`Long`, -`Lat`) %>%
filter(`Country/Region` == "Australia") %>%
gather(dates, cases, -`Country/Region`) %>%
group_by(dates) %>%
summarize(cases = sum(cases)) %>%
mutate(dates = as.Date(dates, "%m/%d/%y")) %>%
arrange(dates) %>%
mutate(cases = c(0,diff(cases)))
ow <- read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_cases.csv") %>%
select(date, Australia) %>%
mutate(date = date - 1) %>%
rename(dates = date, cases = Australia)
wo <- read_html("https://www.worldometers.info/coronavirus/country/australia/") %>%
html_node("body") %>%
xml_find_all("//script[contains(@type, 'text/javascript')]") %>%
html_text() %>%
extract2(11) %>%
rm_between("[", "]", extract = T)
xlab <- wo[[1]][1] %>%
str_split(",") %>%
unlist(use.names = F) %>%
str_sub(2, 7) %>%
as.Date("%b %d")
ylab <- wo[[1]][2] %>%
str_split(",") %>%
unlist(use.names = F) %>%
str_replace("null", "0") %>%
as.numeric()
wo <- data.frame(dates = xlab, cases = ylab)
# setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# jh_gs_id <- gs4_get(jh_gs) %>% pluck("spreadsheet_id")
# ow_gs_id <- gs4_get(ow_gs) %>% pluck("spreadsheet_id")
write_sheet(jh, ss = "1OfdJUC-1nB8cqQ0HsrCpNFC8yPDjDPUUkQFxZA6EdYY", sheet = "jh")
write_sheet(ow, ss = "1OfdJUC-1nB8cqQ0HsrCpNFC8yPDjDPUUkQFxZA6EdYY", sheet = "ow")
write_sheet(wo, ss = "1OfdJUC-1nB8cqQ0HsrCpNFC8yPDjDPUUkQFxZA6EdYY", sheet = "wo")
# saveRDS(wo, file = "worldometer_data.RDS")
# saveRDS(ow, file = "our_world_data.RDS")
# saveRDS(jh, file = "john_hopkins_data.RDS")
|
f9d0217f4ced210fba2d02c63a2eeb1d97680a8a | 4c58dd631dd589459bbeb03b0359bc75f47820e2 | /man/Woolf.CI.Rd | 090a9c27fee71bf26ad1830fd79bd46ad2f1dea6 | [] | no_license | cran/ORCI | 10a74c9a19c6d88c75f4271cb757b3ff0af87a0d | 7a946dbc53e4fab22949bed1ecef1b3af41de6fb | refs/heads/master | 2021-01-10T22:00:54.744874 | 2014-06-09T00:00:00 | 2014-06-09T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 858 | rd | Woolf.CI.Rd | \name{Woolf.CI}
\alias{Woolf.CI}
\title{
Compute Woolf confidence interval for the odds ratio
}
\description{
Compute Woolf confidence interval for the odds ratio of two independent binomial proportions.
}
\usage{
Woolf.CI(x1, n1, x2, n2, conf = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x1}{
number of events in group 1.
}
\item{n1}{
number of trials in group 1.
}
\item{x2}{
number of events in group 2.
}
\item{n2}{
number of trials in group 2.
}
\item{conf}{
confidence level.
}
}
\references{
Woolf, B. (1955). On estimating the relation between blood group and disease. Annals of human genetics, 19(4):251-253.
}
\author{
Libo Sun
}
\examples{
# x1=2, n1=14, x2=1, n2=11
# 95% confidence interval for the odds ratio
Woolf.CI(2, 14, 1, 11)
}
\keyword{Woolf}
|
61e206fbfb4806c46706406cd0315b3d206d2299 | 9d2bf430cee06e57524439ed751e5fa0c34cc362 | /R_Classifier/youtubeClassifier.R | d3c9bed4c3a7ca25a2d3c68230113770def15a07 | [
"Apache-2.0"
] | permissive | ybotkiller/ybotkiller | 9eb12607223a5c0b33cc3a4a74f190ffac7c62e6 | 2141805b4a107d232df56c713647b65204a9b9b7 | refs/heads/master | 2021-01-16T18:38:14.884783 | 2017-08-14T19:16:17 | 2017-08-14T19:16:17 | 100,103,277 | 25 | 2 | null | 2017-08-13T15:06:46 | 2017-08-12T09:44:35 | Jupyter Notebook | UTF-8 | R | false | false | 6,142 | r | youtubeClassifier.R | # rm(list = ls())
source("visualization.R")
# df <- read.csv(file = "data/comments.csv", encoding = "UTF-8")
# save(df, file = "df.RData")
# library("xlsx")
# df <- read.xlsx(file = "data/classified2000_2.xlsx", sheetIndex = 1, header = TRUE, encoding = "UTF-8")
# save(df, file = "df.RData")
load("df.RData")
df_1 <- df[!is.na(df$class),]
df_1 <- df_1[,1:9]
#
# df_2_Ok <- df[df$replies.class == 0, ]
# df_2_Ok <- df_2_Ok[!is.na(df_2_Ok$replies.class),]
# df_2_Bot <- df[df$replies.class == 1, ]
# df_2_Bot <- df_2_Bot[!is.na(df_2_Bot$replies.class),]
evaluateResults <- function(df_test_pred, df_test_labels) {
library("gmodels")
CrossTable(df_test_pred, df_test_labels,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
tbl <- table(df_test_pred, df_test_labels)
mistakes = sum(tbl) - sum(diag(tbl))
effectiveness = sum(diag(tbl)) / (sum(tbl))
print(str(mistakes))
print(str(effectiveness))
library(ROCR);
pred <- prediction(as.numeric(df_test_pred), as.numeric(df_test_labels));
perf1 <- performance(pred, "prec", "rec");
plot(perf1);
perf2 <- performance(pred, "tpr", "fpr");
plot(perf2);
auc.tmp <- performance(pred,"auc");
auc <- as.numeric(auc.tmp@y.values);
#library("vcd")
#library("caret")
#library("ROCR")
#print(kappa(table(df_test_labels,df_test_pred)))
#print(sensitivity(df_test_labels,df_test_pred))
#print(specificity(df_test_labels,df_test_pred))
#print(posPredValue(df_test_labels,df_test_pred))
}
# set.seed(1)
# data <- data[sample(nrow(data)),]
# Create corpus
library("tm")
library("stringr")
library("SnowballC")
# df$commentText <- sapply(df$commentText,function(row) iconv(row, "utf-8", "utf-8", sub=""))
# usableText <- iconv(df$commentText, 'UTF-8', 'UTF-8')
# df <- df_2_Bot
df <- df_1
usableText = str_replace_all(df$commentText,"[^[:graph:]]", " ")
corpus <- VCorpus(VectorSource(usableText))
corpus_clean <- tm_map(corpus, content_transformer(tolower))
corpus_clean <- tm_map(corpus, removeNumbers)
corpus_clean <- tm_map(corpus_clean, removeWords, stopwords("russian"))
corpus_clean <- tm_map(corpus_clean, removePunctuation)
corpus_clean <- tm_map(corpus_clean, stemDocument, "russian")
corpus_clean <- tm_map(corpus_clean, stripWhitespace)
replacePunctuation <- function(x) {
gsub("[[:punct:]]+", " ", x)
}
# Create document term matrix
dtm <- DocumentTermMatrix(corpus_clean, control = list(removePunctuation = TRUE,
stopwords = TRUE,
tolower=FALSE))
sparse <- 0.999
dtm2 <- removeSparseTerms(dtm, sparse)
data_dtm <- as.data.frame(as.matrix(dtm2))
data_dtm <- data_dtm[, !duplicated(colnames(data_dtm))]
for (i in 1:ncol(data_dtm)) {
data_dtm[,i] <- as.factor(as.character(data_dtm[,i]))
}
# data_dtm <- data[ , (names(data) %in% c("блять", "молодцы", "тупая"))]
# listBad <- c("")
# listSwearing <- c("")
data <- cbind(df, data_dtm)
data$class <- as.factor(as.character(data$class))
# Visualize data
# makeWordCloud(corpus_clean)
# makeWordsHist(data_dtm)
# # Make terms table
#
# df_terms <- data.frame(term = character(ncol(data_dtm)),
# ok = integer(ncol(data_dtm)),
# bot = integer(ncol(data_dtm)), stringsAsFactors = FALSE)
#
# df_1_Ok <- data[data$class == 0, ]
# df_1_Ok <- df_1_Ok[!is.na(df_1_Ok$class),]
# df_1_Bot <- data[data$class == 1, ]
# df_1_Bot <- df_1_Bot[!is.na(df_1_Bot$class),]
#
# for (i in 1:ncol(data_dtm)) {
# df_terms$term[i] = as.character(unlist(colnames(data_dtm[i])))
# df_terms$ok[i] = sum(as.numeric(as.character(df_1_Ok[,9+i])))
# df_terms$bot[i] = sum(as.numeric(as.character(df_1_Bot[,9+i])))
# }
#
# library(xlsx)
# write.xlsx2(df_terms, file = "data/terms.xlsx", sheetName="data", col.names=TRUE, row.names=FALSE, append=FALSE)
# # Make df of equal size of classes
# data <- rbind(df_1_Ok[1:nrow(df_1_Bot),], df_1_Bot)
set.seed(2)
sampleForData <- sample(nrow(data), round(nrow(data) * 0.8))
data_train1 <- data[sampleForData,]
data_test1 <- data[-sampleForData,]
data_train = data_train1[ , !(names(data_train1) %in% c("id", "user", "date", "timestamp", "commentText"))]
data_test = data_test1[ , !(names(data_test1) %in% c("id", "user", "date", "timestamp", "commentText"))]
data_train_labels <- factor(data_train$class)
data_test_labels <- factor(data_test$class)
start.time <- Sys.time()
# library("class")
# data_test_pred <- knn(train = data_train, test = data_test,
# cl = data_train_labels, k = 10)
# evaluateResults(data_test_labels, data_test_pred)
#
# library("C50")
# tree_model <- C5.0(data_train, data_train_labels, trials = 100)
# data_test_pred <- predict(tree_model, data_test, type = "class")
# evaluateResults(data_test_labels, data_test_pred)
# library("kernlab")
# data_train <- data_train[, !duplicated(colnames(data_train))]
# # svm_classifier <- ksvm(class ~ ., data = data_train, kernel = "polydot", C = 1, sigma = 1)
# svm_classifier <- ksvm(class ~ ., data = data_train, kernel = "polydot")
# data_test_pred <- predict(svm_classifier, data_test)
# evaluateResults(data_test_labels, data_test_pred)
require("caret")
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
# fitControl <- trainControl(method = "none")
model <- train(class ~ ., data = data_train,
# tuneLength = 1,
method = "naive_bayes", na.action = na.omit,
trControl = fitControl)
df_test_pred <- predict(model, data_test)
evaluateResults(df_test_pred, data_test_labels)
# for (i in 1:length(df_test_pred)) {
# if (df_test_pred[i] == 1) {
# print(i)
# }
# }
#
# end.time <- Sys.time()
# time.taken <- end.time - start.time
# library("xlsx")
# write.xlsx2(df, file = "data/classified.xlsx", sheetName="data", col.names=TRUE, row.names=FALSE, append=FALSE)
|
189fc011cab4baab0ef091073e87ada767021c30 | 4ee78d872fec51bc5e4da565f639d5fcb5981042 | /server.R | cd4b0f2f5f1239b3a2a312cb86825c540b9c9439 | [] | no_license | fsakaori/bayes1_normal_approx | 8d0c8f567f38eed9456e9cd323e7cd225c9260a3 | 2029e6cb776bf81308e16b07ec11ac97f5decf2d | refs/heads/master | 2023-01-23T23:52:57.386761 | 2020-11-27T08:44:06 | 2020-11-27T08:44:06 | 316,445,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,258 | r | server.R | library(shiny)
#input$size : サイズ, input$mu : 真の平均, input$sigma : 真の分散, input$seed : シードの値
shinyServer(function(input, output) {
dataInput <- reactive({
set.seed(input$seed)
rnorm(input$size, mean=input$mu, sd=sqrt(input$sigma)) # shiny のなかでデータを保持しておく.
})
output$Data <- renderTable({
head_data <- head(dataInput()) ; tail_data <- tail(dataInput())
dat_1 <- data.frame(head_data, tail_data) # DT パッケージでさらにインタラクティブになるが, DT パッケージをインストールすらしていない人向けではないため共有することに向いていないと判断.
})
output$graph_1<-renderPlot({
mean_x <- mean(dataInput()) ; sig_x <- var(dataInput())
func <- function(x,y){ # 外部で mean_x <- -1 : sig_x <- 2 ; n<-10 みたいなデータの情報が必要!
ey <- exp(y)
ey^(-input$size)*exp(-1/(2*ey^2)*((input$size-1)*sig_x + input$size*(mean_x-x)^2)) } # 近似したい関数
x <- seq(-3,3,length.out=1000) # x座標
y <- seq(-3,3,length.out=1000) # y座標
z <- outer(x,y,func) # z座標
contour(x, y, z,drawlabels=F,
xlim=c(-input$xlim, input$xlim), ylim=c(-input$ylim, input$ylim), xlab="μ", ylab="logσ", col=2, main="Posterior distribution") # これで等高線を描く. drawlabels = F で等高線のレベルを書かないので美しい.
})
output$graph_2 <- renderPlot({
mean_x <- mean(dataInput()) ; sig_x <- var(dataInput())
mu_1 <- mean_x ; sig_1 <- ((input$size-1)/input$size)*sig_x # n で割った分散がsig_x
mu_2 <- log(sqrt(sig_1)) ; sig_2 <- 1/(2*input$size)
func <- function(x,y) {
nx <- (x-mu_1)/(sig_1/sqrt(input$size)) ; ny <- (y-mu_2)/sqrt((sig_2)) # 標準化
1/(2*pi*sqrt(sig_1*sig_2))*exp(-nx^2-ny^2) } # 無相関な2次元正規分布 : 実は 2*pi を書かなくても出力は同じになる.
x <- seq(-3,3,length.out=1000) # x座標
y <- seq(-3,3,length.out=1000) # y座標
z <- outer(x,y,func) # z座標
contour(x, y, z,drawlabels=F,
xlim=c(-input$xlim, input$xlim), ylim=c(-input$ylim, input$ylim),xlab="μ", ylab="logσ", col=4, main="Normal distribution") # これで等高線を描く. drawlabels = F で等高線のレベルを書かないので美しい.
})
output$graph_3 <- renderPlot({
mean_x <- mean(dataInput()) ; sig_x <- var(dataInput())
## graph_1 のコピペ(始)
func <- function(x,y){ # 外部で mean_x <- -1 : sig_x <- 2 ; n<-10 みたいなデータの情報が必要!
ey <- exp(y)
ey^(-input$size)*exp(-1/(2*ey^2)*((input$size-1)*sig_x + input$size*(mean_x-x)^2)) } # 近似したい関数
x <- seq(-3,3,length.out=1000) # x座標
y <- seq(-3,3,length.out=1000) # y座標
z <- outer(x,y,func) # z座標
contour(x, y, z,drawlabels=F,
xlim=c(-input$xlim, input$xlim), ylim=c(-input$ylim, input$ylim), col=2) # これで等高線を描く. drawlabels = F で等高線のレベルを書かないので美しい.
## graph_1 のコピペ(終)
par(new=T)
## graph_2 のコピペ(始)
mu_1 <- mean_x ; sig_1 <- ((input$size-1)/input$size)*sig_x # n で割った分散がsig_x
mu_2 <- log(sqrt(sig_1)) ; sig_2 <- 1/(2*input$size)
func <- function(x,y) {
nx <- (x-mu_1)/(sig_1/sqrt(input$size)) ; ny <- (y-mu_2)/sqrt((sig_2)) # 標準化
1/(2*pi*sqrt(sig_1*sig_2))*exp(-nx^2-ny^2) } # 無相関な2次元正規分布 : 実は 2*pi を書かなくても出力は同じになる.
x <- seq(-3,3,length.out=1000) # x座標
y <- seq(-3,3,length.out=1000) # y座標
z <- outer(x,y,func) # z座標
contour(x, y, z,drawlabels=F,
xlim=c(-input$xlim, input$xlim), ylim=c(-input$ylim, input$ylim), xlab="μ", ylab="logσ", col=4, main="Posterior ~ Normal") # これで等高線を描く. drawlabels = F で等高線のレベルを書かないので美しい.
## graph_2 のコピペ(終)
## あとは legend をつけるだけ
legend("topright", legend = c("Posterior", "Normal"), col = c(2,4), lty=1, # legend と col は c() で複数選択可能. 別で用意して代入することも可.
box.lwd = 2, # 枠線の太さ
box.lty = 1, # 枠線のタイプ(実船・破線になったりする)
box.col = "darkgreen", # 枠線の色
text.col = c(2,4), # 凡例の文字色
text.font = 2, # 1=通常, 2=ボールド, 3=イタリック
text.width = 0.115, # テキストの幅
bg = "white") # 凡例領域の背景
## 所感 : 近似具合は見えるけど, スライダーの設定によってははみ出るので使いにくさが出てくるかも...(汗)
## まぁでもそうすることで軸の値を気にしなくなるという悪い癖は生じなくなるだろうし, 平均や分散のスライダーで分布が動くところも shiny の楽しいところだと思います.
})
}) |
ac0415b6946857e57bbce8c0f30b2e4c2dc8515c | 6ee0550428b87c9565addae16f40d6ed413df3b4 | /R/parallelsetting.r | 78dd79eb4e25d962f72798048184b06ba461ddb3 | [] | no_license | maoguihu/atakrig | cba2d9c8160c77af8889c2af0129e99d2c1684b4 | c5f9a9445907670602914b2eeed504b54cbeec3b | refs/heads/master | 2021-06-11T17:29:58.286872 | 2021-06-02T10:31:13 | 2021-06-02T10:31:13 | 191,763,675 | 4 | 4 | null | 2021-04-26T13:29:28 | 2019-06-13T12:59:10 | R | UTF-8 | R | false | false | 716 | r | parallelsetting.r | ## atakriging
## Author: Maogui Hu.
ataStartCluster <- function(spec = min(parallel::detectCores(), 8), ...) {
cl <- getOption("ataKrigCluster")
if(!is.null(cl)) try(snow::stopCluster(cl), silent = TRUE)
cl <- snow::makeCluster(spec = spec, ...)
doSNOW::registerDoSNOW(cl)
options(ataKrigCluster = cl)
cl
}
ataStopCluster <- function() {
cl <- getOption("ataKrigCluster")
if(!is.null(cl)) try(stopCluster(cl), silent = TRUE)
options(ataKrigCluster = NULL)
}
ataClusterClearObj <- function() {
cl <- getOption("ataKrigCluster")
if(!is.null(cl)) try(clusterEvalQ(cl, "rm(list=ls())"), silent = TRUE)
}
ataIsClusterEnabled <- function() {
return(!is.null(getOption("ataKrigCluster")))
}
|
55e5a3fd20c152d51931c40d79de4ee477e5cb4e | 72002cbc729a343b747e20c1f5df72562e5c4dc8 | /man/impute.me.Rd | 6312035aa3c7b11c98fc61eed66d4ad0912eec4d | [] | no_license | fenrir849/fifer | efa973d96819f367916447bed8a2cb484d588d98 | 2f52785312135ddd441f9cedb0e4ab411549684e | refs/heads/master | 2023-01-02T02:53:52.724741 | 2020-10-29T16:53:16 | 2020-10-29T16:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,911 | rd | impute.me.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impute.me.R
\name{impute.me}
\alias{impute.me}
\title{Multiple Imputation on a Model}
\usage{
impute.me(
model,
data,
predictors = NULL,
keep = T,
imputations = 20,
silent = F,
return.mod = F
)
}
\arguments{
\item{model}{An R-friendly model. Currently, it only allows lm objects, but will eventually allow other objects (e.g., glm).}
\item{data}{The dataset used for analysis. This dataset should contain predictors used to impute the missing values}
\item{predictors}{A list of predictors (as a character vector) that identify which variables to keep (or drop; see below argument).}
\item{keep}{Logical. Should the list of predictors be kept or dropped? Defaults to keep.}
\item{imputations}{The number of imputations to be performed. Defaults to 20.}
}
\value{
.mod Should the model be returned?
}
\description{
Multiple Imputation on a Model
}
\details{
This is a wrapper function for both the mice function in the mice package, as well as for basic models in R (e.g., lm). As input,
it takes the model the user wishes to estimate using advanced missing data strategies, as well as a list of variables they wish to use
to impute the missing values. The function takes the raw data and performs MI using mice, then re-analyzes the dataset and outputs the
multiply imputed parameter estimates.
}
\examples{
data(exercise_data)
d = exercise_data
##### create missing data in motivation
missing.ld = which(d$motivation<quantile(d$motivation, .25))
notmissing = which(!(1:nrow(d) \%in\% missing.ld))
d$weight.loss.missing = d$weight.loss
d$weight.loss.missing[missing.ld] = NA
#### create model with missing data
model = lm(weight.loss.missing~motivation, data=d)
predictors = c("muscle.gain.missing", "weight.loss")
impute.me(mod, data=d, predictors=predictors, keep=F, imputations=5)
}
\author{
Dustin Fife
}
|
8d8338c512cf1568e8fa17159a132e27bed1088e | 830b4deaa3ab156827cf284cf699a550888c4b1c | /plot1.R | 4ac231ffea47c70a51462ccfc467ec5367cb48c4 | [] | no_license | DataAnalystUser/ExData_Plotting1 | 0f6de6c610101bfb0351af51677614de72be6a9c | 3ae94e62db19cc4dc3da9450f842c50e9c21da63 | refs/heads/master | 2021-01-17T06:59:39.061917 | 2015-07-09T16:51:16 | 2015-07-09T16:51:16 | 38,705,161 | 0 | 0 | null | 2015-07-07T17:57:39 | 2015-07-07T17:57:39 | null | UTF-8 | R | false | false | 499 | r | plot1.R | PowerData = read.table("household_power_consumption.txt", header = TRUE, sep = ';', na.strings = "?",stringsAsFactors = FALSE)
PowerData$Date <- as.Date(PowerData$Date, format="%d/%m/%Y")
ImportantDates <- PowerData[(PowerData$Date=="2007-02-01") | (PowerData$Date=="2007-02-02"),]
hist(ImportantDates$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red")
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
|
4a28d6cf0e550e6d409e81e6fb0a64d60357465b | adcc74d227f9864430b26035be9befa9a2d110c8 | /man/matred.Rd | ae3ae07ae8367daeefc940913bd660ab9f5c4b05 | [] | no_license | PolyOmica/funtooNorm | 4ca8b4512d2853a5e2e7e714a8710d4b7b4df61c | d187a497679f8634c328e443c7509de7616e7216 | refs/heads/master | 2021-01-15T14:29:56.068196 | 2016-01-10T17:12:49 | 2016-01-10T17:12:49 | 45,708,881 | 0 | 1 | null | 2015-11-06T21:30:20 | 2015-11-06T21:30:20 | null | UTF-8 | R | false | false | 426 | rd | matred.Rd | \name{matred}
\alias{matred}
\docType{data}
\title{
Red control signals
}
\description{
Matrix containing the control probe signals from the red channel. The column names should be the same as in the signal A and signal B matrices.
}
\usage{matred}
\format{
A matrix with 835 rows (one for each control probe) and 93 columns
(one per sample). Each element is the control probe signal for the
given probe and sample.
}
|
8194a3f87af737efa14f971e30c1845b2e2c3be4 | 002929791137054e4f3557cd1411a65ef7cad74b | /tests/testthat/test_calcFE.R | 85a760dfa618694bc174c993333cb5b1c39ae818 | [
"MIT"
] | permissive | jhagberg/nprcgenekeepr | 42b453e3d7b25607b5f39fe70cd2f47bda1e4b82 | 41a57f65f7084eccd8f73be75da431f094688c7b | refs/heads/master | 2023-03-04T07:57:40.896714 | 2023-02-27T09:43:07 | 2023-02-27T09:43:07 | 301,739,629 | 0 | 0 | NOASSERTION | 2023-02-27T09:43:08 | 2020-10-06T13:40:28 | null | UTF-8 | R | false | false | 1,490 | r | test_calcFE.R | #' Copyright(c) 2017-2020 R. Mark Sharp
#' This file is part of nprcgenekeepr
context("calcFE")
library(testthat)
ped <- data.frame(
id = c("A", "B", "C", "D", "E", "F", "G"),
sire = c(NA, NA, "A", "A", NA, "D", "D"),
dam = c(NA, NA, "B", "B", NA, "E", "E"),
stringsAsFactors = FALSE
)
ped["gen"] <- findGeneration(ped$id, ped$sire, ped$dam)
ped$population <- getGVPopulation(ped, NULL)
pedFactors <- data.frame(
id = c("A", "B", "C", "D", "E", "F", "G"),
sire = c(NA, NA, "A", "A", NA, "D", "D"),
dam = c(NA, NA, "B", "B", NA, "E", "E"),
stringsAsFactors = TRUE
)
pedFactors["gen"] <- findGeneration(pedFactors$id, pedFactors$sire,
pedFactors$dam)
pedFactors$population <- getGVPopulation(pedFactors, NULL)
fe <- calcFE(ped)
feFactors <- calcFE(pedFactors)
## Prior to forcing the pedigree to have id, sire, and dam as character vectors
## inside calcFE, the two calculations above with ped (characters) and
## feFactors (factors) resulted 2.9090 and 2.000 respectively.
##
## Used example from Analysis of Founder Representation in Pedigrees: Founder
## Equivalents and Founder Genome Equivalents.
## Zoo Biology 8:111-123, (1989) by Robert C. Lacy
## He presented 2.91 as the answer, which was not precise enough for this
## specific comparison.
test_that("calcFE correctly calculates the number of founder equivalents in
the pedigree", {
expect_equal(fe, feFactors)
expect_equal(fe, 2.9090909091)
})
|
56fc12bbc3cabfc725e38d0e7138ece9a41289f9 | d09aea6359ada6c8a78ddeafc1989e93ba9294aa | /man/Data.Rd | 3680cd486a323239a1605a2f3ded1181ffab658e | [] | no_license | hyunsooseol/SimplyAgree | 4f28d8656f36be5d8a1e2e266c7ffb9c5c559c00 | 7507b271ce0528f92088d410d7d5f72d7c15d799 | refs/heads/master | 2023-05-30T21:45:57.725904 | 2021-06-22T00:13:59 | 2021-06-22T00:13:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 415 | rd | Data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{reps}
\alias{reps}
\title{reps}
\format{
A data frame with 20 rows with 3 variables
\describe{
\item{id}{Subject identifier}
\item{x}{X measurement}
\item{y}{Y measurement}
}
}
\usage{
reps
}
\description{
A fake data set of a agreement study where both measures have replicates.
}
\keyword{datasets}
|
b6f556a1ef86e578ee81c5bd445ddde4de1c3d79 | f2064bd6c9fe0cdc1d22b8b149ad7e5acc64a9cd | /ui.r | 24101853ace4174cef3408469e38791119cb15d2 | [] | no_license | SaurabhWhadgar/anstephdb | 78453594ad6fd11e2ff3f26b02bacbd156eae042 | d7a2a09c457cff4d06e0f35e992bdcf479ce1af7 | refs/heads/master | 2022-12-13T23:36:40.439161 | 2020-08-30T08:41:10 | 2020-08-30T08:41:10 | 266,037,958 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,943 | r | ui.r | library(shiny)
library(DT)
library(shinydashboard)
library(shinydashboardPlus)
custom_db <- c("Ansteph Genome","Ansteph Protein")
dashboardPagePlus(skin = "black",
header = dashboardHeaderPlus(title = "An. stephensi Genome Resource Hub",titleWidth = 450,
enable_rightsidebar = FALSE, rightSidebarIcon = "gears",uiOutput("logoutbtn")
),#Header Close
sidebar = dashboardSidebar(sidebarMenu(
menuItem("Home", tabName = "dashboard", icon = icon("dashboard"),selected = TRUE),
menuItem("Download", tabName = "download", icon = icon("th")),
menuItem("Tools", tabName = "tool", icon = icon("th")),
menuSubItem("Blast",tabName = "shinyblast",icon = shiny::icon("angle-double-right"), selected = NULL),
menuSubItem("JBrowse",tabName = "jbrowse", icon = shiny::icon("angle-double-right"), selected = NULL),
menuSubItem("SequenceServer-Blast",tabName = "seqserver",icon = shiny::icon("angle-double-right"), selected = NULL),
menuItem("About Us", tabName = "aboutus", icon = icon("th")),
menuItem("Enquiry", tabName = "chat", icon = icon("th")))),#Sidebar Closs
body <- dashboardBody(shinyjs::useShinyjs(),
tabItems(
tabItem(tabName = "dashboard",
box(
solidHeader = FALSE,
title = "Looking For",background = NULL, width = 12,status = "danger",
footer = fluidRow(
column(width = 6,
descriptionBlock(
number = HTML("<a href='http://3.93.125.130'>JBrowse+Search</a>"),
number_color = "yellow",number_icon = "fas fa-align-justify",
header = HTML("<a href='http://3.93.125.130'>Biologist Version</a>"),
text = "Easy and Simple",
right_border = TRUE,
margin_bottom = FALSE)
),
column(width = 6,
descriptionBlock(
number = HTML("<a href='http://3.93.125.130'>All the Capabalities</a>"),
number_color = "black",
number_icon = "fa fa-arrows-alt",
header = HTML("<a href='http://3.93.125.130'>Data Analyst Version</a>"),
text = "Complex & Huge",
right_border = TRUE,
margin_bottom = TRUE
)#block2
)#column2
)#fluidrow
),#box
box(width = 12,fluidRow(
column(width = 4,
selectInput(inputId = 'searchby',label = "Search By",choices = c("Gene Name","Gene Function","Start Site","Pfam","InterPro","GO"))),#column1
column(width = 4,
textInput("querysearch","Query", placeholder = "hydroxyalse, t1.221, PF228800")),#column2
column(width = 4,title = "Search Database", status = "primary",collapsible = TRUE,HTML("<br>"),actionButton("submit","SUBMIT"))#column3
)),#box
box(width = 12,fluidRow(
column(width = 12,DT::dataTableOutput("queryresult"))
))),#box2
tabItem(tabName = "shinyblast",
fluidRow(infoBox("Step 1","Insert Query"),
infoBox("Step 2","Select Program - blastn or blastp"),
infoBox("Step 3","BLAST")
),
fluidRow(box(title = NULL, status = "info", solidHeader = FALSE, width = 12,
collapsible = TRUE,textAreaInput('query', 'Input sequence:', value = "", placeholder = "", height="200px"))),
fluidRow(box(width = 3,status = "info",selectInput("db", "Database:", choices=c(custom_db))),
(box(width = 3,status = "danger",selectInput("program", "Program:", choices=c("blastn","blastp","tblastn","blastx")))),
(box(width = 3,status = "warning",selectInput("eval", "e-value:", choices=c(1,0.001,1e-4,1e-5,1e-10)))),
(box(width = 3,status = "success",HTML("</br>"),actionButton("blast", "BLAST"),HTML("</br>"),HTML("</br>")))),
# textInput("text","Label",value="")
fluidRow(box(width = 12,status = "success",title = "Blast Result",DT::dataTableOutput("blastResults"))),
fluidRow(box(width = 12,status = "info",title = "Detailed Result",DT::dataTableOutput("clicked"))),
fluidRow(box(width = 12,status = "warning",title = "Show Alignment",verbatimTextOutput("alignment"))),
#this snippet generates a progress indicator for long BLASTs
# div(class = "busy",
# p("Calculation in progress.."),
# img(src="https://i.stack.imgur.com/8puiO.gif", height = 100, width = 100,align = "center")
# ), #Basic results output
),
tabItem(tabName = "tool",shinyjs::useShinyjs(),
box(width = 12,title = "Sequence Search",footer = "This will help to get sequence from Genome",status = "danger",solidHeader = TRUE,
collapsible = TRUE,
fluidRow(
box(width = 3,selectInput("chr",label = "Select Chromosome",choices = c("chr2","chr3","chrX"))),
box(width = 3,numericInput("start","START",value = 1,min = 1)),
box(width = 3,numericInput("end",label = "END",value = 2,min = 2)),
box(width = 3,HTML("</br>"),actionButton("getseq",label = "Fetch"))
),
fluidRow(box(width = 12,title = "Feteched Sequence",status = "success",textOutput("chrseq"))),
fluidRow(box(width = 12,title="Complementary Sequence",status = "success",textOutput("chrseqcomp"))),
fluidRow(box(width = 12,title="Reverse Complementry Sequence",status = "success",textOutput("chrseqrevcom"))),
)),
tabItem(tabName = "chat",
includeCSS("shinychat.css"),includeScript("sendOnEnter.js"),
fluidRow(box(width = 8,title="Chat Room",uiOutput("chat")),
box(width = 4,title="To help you please enter your name", textInput("user", "Your User Name:", value=""),
tags$hr(), h5("Connected Users"),# Create a spot for a dynamic UI containing the list of users.
uiOutput("userList"))),
fluidRow(box(width = 6,title="Type Text Here",textInput("entry", "")),
box(width = 6,title="To Send your message click Send button",HTML("</br>"),actionButton("send", "Send")))
# div(
# # Setup custom Bootstrap elements here to define a new layout
# class = "container-fluid",
# div(class = "row-fluid",
# # Set the page title
# tags$head(tags$title("ShinyChat")),
#
# # Create the header
# div(class="span6", style="padding: 10px 0px;",
# h1("ShinyChat"),
# h4("Hipper than IRC...")
# ), div(class="span6", id="play-nice",
# "IP Addresses are logged... be a decent human being."
# ))),
# div(
# class = "row-fluid",
# mainPanel(
# Create a spot for a dynamic UI containing the chat contents.
# uiOutput("chat"),
# fluidRow(box(width = 12,title="Reverse Complementry Sequence",status = "success",textInput("entry", ""))),
# fluidRow(box(width = 12,title="Reverse Complementry Sequence",status = "success",actionButton("send", "Send"))),
# Create the bottom bar to allow users to chat.
# fluidRow(
# div(class="span10",
#
# ),
# div(class="span2 center",
# actionButton("send", "Send")
# )
# )
# ),
# The right sidebar
# sidebarPanel(
# # Let the user define his/her own ID
# textInput("user", "Your User ID:", value=""),
# tags$hr(),
# h5("Connected Users"),
# # Create a spot for a dynamic UI containing the list of users.
# uiOutput("userList"),
# tags$hr(),
# helpText(HTML("<p>Built using R & <a href = \"http://rstudio.com/shiny/\">Shiny</a>.<p>Source code available <a href =\"https://github.com/trestletech/ShinyChat\">on GitHub</a>."))
# )
),
tabItem(tabName = "jbrowse",
fluidRow(column(12, htmlOutput("jb_frame")))),
tabItem(tabName = "seqserver",
fluidRow(column(12, htmlOutput("seqserver_frame")))),
tabItem(tabName = "download",
fluidRow(column(12, htmlOutput("download_frame"))))
)),#dashboard
dashboardFooter(
left_text = "Database is maintained By IBAB",
right_text = "Insititue of Bioinformatics & Applied Biotechnolgy, 2020"
),#footer
rightsidebar = rightSidebar(
background = "dark",
rightSidebarTabContent(
id = 1,
title = "Tab 1",
icon = "desktop",
active = TRUE,
sliderInput(
"obs",
"Number of observations:",
min = 0, max = 1000, value = 500
)
),
rightSidebarTabContent(
id = 2,
title = "Tab 2",
textInput("caption", "Caption", "Data Summary")
),
rightSidebarTabContent(
id = 3,
icon = "paint-brush",
title = "Tab 3",
numericInput("obs", "Observations:", 10, min = 1, max = 100)
)
),
title = "IBAB"
)
#“blue”, “black”, “purple”, “green”, “red”, “yellow” |
af4ee0e613ac7b3bd2d207cef8c06cbf1bb6fd6e | 982f39c84763cbf209c3a2c94ec72a5a5b0c75eb | /analyses/1_exploration/R_scripts/4_get_climate_dists.R | 92543c231f57f109ab0f678a6308bb82d78e145a | [] | no_license | mllewis/conceptviz | 7c2078688d808d50f4532433ff5ffec24f02d66b | 36b8180d8c14e9b71f814434cc07ac34e25a2d12 | refs/heads/master | 2021-07-02T08:53:29.466672 | 2020-08-06T15:14:38 | 2020-08-06T15:14:38 | 93,209,750 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 824 | r | 4_get_climate_dists.R | ### use climate dists and trade data to look at pairwise relations ####
library(tidyverse)
library(forcats)
library(knitr)
library(countrycode)
library(stringr)
precip <- read_csv( "../../data/supplementary_data/cultural_sim_measures/geo/country_precip.csv") %>%
select(ISO_3DIGIT, Annual_precip) %>%
rename(precip = Annual_precip)
temp <- read_csv( "../../data/supplementary_data/cultural_sim_measures/geo/country_temp.csv") %>%
select(ISO_3DIGIT, Annual_temp) %>%
rename(temp = Annual_temp)
climate_data <- precip %>%
left_join(temp) %>%
mutate(country_code = countrycode(ISO_3DIGIT, "iso3c", "iso2c")) %>%
select(country_code, temp, precip ) %>%
mutate(country_code = as.factor(country_code))
write_csv(climate_data, "../../data/supplementary_data/cultural_sim_measures/geo/all_climiate_data.csv") |
385594dd71799f7d52f25b856071754d618c1dc8 | f439a076bc3fcac2c8d7eb72e69dc8d24a00b263 | /Unit 4 Trees/D2Hawkeye.R | c3b7fc846bb296f9b2225b8a3cac53527549dc1b | [] | no_license | jakehawk34/MIT-Analytics | 73f9afb0cbfbbd8202e415f0c50c8e638aa76db1 | daa2ca2eca44ba6c74ba5773d992f68e8c775b90 | refs/heads/main | 2023-05-07T13:54:40.796512 | 2021-05-21T00:31:11 | 2021-05-21T00:31:11 | 344,290,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,148 | r | D2Hawkeye.R | # Unit 4
# Keeping an Eye on Healthcare Costs: The D2Hawkeye Story
# This is a story of D2Hawkeye, a medical data mining company located in Waltham, Massachusetts.
# It starts with medical claims that consist of diagnoses, procedures, and drugs.
# These medical claims are then processed via process of aggregation, cleaning, and normalization.
# This data then enters secure databases on which predictive models are applied.
# The output of predictive models are specific reports that give insight to the various questions that D2Hawkeye aspires to answer.
# D2Hawkeye had many different types of clients. The most important were third party administrators of medical claims.
# To analyze the data, the company used what we call a pre-analytics approach.
# This was based on the human judgment of physicians who manually analyze patient histories and developed medical rules.
# Of course, this involved human judgment, utilized a limited set of data, it was often costly, and somewhat inefficient.
# The key question we analyze in this lecture is "Can we use analytics instead?"
# Health care industry is data-rich, but data may be hard to access.
# Claims data, requests for reimbursement submitted to insurance companies or state-provided insurance
# by doctors, hospitals and pharmacies.
# Eligibility information and demographic information are also good sources.
# Claims data is rich, structured, high dimension. However, this collection of data does not
# capture all aspects of a person's treatment or health. Many things must be inferred.
# Observation period from 2001-2003. Results period in 2004. Patients with at least 10 months of data in each period were included.
# 13,000 diagnoses --> 217 diagnosis groups
# 22,000 procedures --> 213 procedure groups
# 45,000 prescription drugs --> 189 therapeutic groups
# In addition to the defined groups, we also defined in collaboration with medical doctors, 269 medically-defined rules.
# Interactions between illnesses, interactions between illness and age, noncompliance treatment, and illness severity.
# Five buckets defined with 20% of all costs in each bucket.
# Partitions were from 0 to $3,000, $3,000 to $8,000, $8,000 to $19,000, $19,000 to $55,0000, and >$55,000
# 78% of all patients were in the bucket from 0 to $3,000
# Bucket 1 was low risk, Bucket 2 is emerging risk, Bucket 3 is moderate risk, Bucket 4 is high risk, and Bucket 5 is very high risk.
# Error Measures
# In D2Hawkeye, failing to classify a high risk patient was much more costly than failing to classify a low risk patient correctly.
# To account for this, a penalty error used asymmetric penalties in penalty matrix.
# Baseline: predict that the cost in the next period will be the same as the cost in the current period.
# Baseline had an accuracy of 75% and a penalty error of 0.56.
# Multi-class classification
# Costs were the most important for determining splits in the beginning of the tree.
# So let us give some examples of bucket five.
# 1. The patient is under 35 years old, he has between 3,300 and 3,900 in claims, coronary artery disease as a diagnosis, but no office visits in the last year.
# 2. Claims between $3,900 and $43,000 with at least $8,000 paid in the last 12 months, $4,300 in pharmacy claims, and acute cost profile and cancer diagnosis.
# 3. More than $58,000 in claims, but at least $50,000 paid in the last 12 months, but not an acute profile.
# The observations represent a 1% random sample of Medicare beneficiaries, limited to those still alive at the end of 2008.
# Our independent variables are from 2008, and we will be predicting cost in 2009.
Claims = read.csv("ClaimsData.csv", stringsAsFactors = TRUE)
str(Claims)
table(Claims$bucket2009) / nrow(Claims)
library(caTools)
set.seed(88)
spl = sample.split(Claims$bucket2009, SplitRatio = 0.6)
ClaimsTrain = subset(Claims, spl == TRUE)
ClaimsTest = subset(Claims, spl == FALSE)
# Quick Question 6
mean(ClaimsTrain$age)
mean(ClaimsTrain$diabetes)
# Baseline method
table(ClaimsTest$bucket2009, ClaimsTest$bucket2008)
# Accuracy is sum of the diagonal divided by all observations
(110138 + 10721 + 2774 + 1539 + 104) / nrow(ClaimsTest)
# Penalty error and penalty matrix; actual outcomes on the left, predicted outcomes on top.
PenaltyMatrix = matrix(c(0, 1, 2, 3, 4,
2, 0, 1, 2, 3,
4, 2, 0, 1, 2,
6, 4, 2, 0, 1,
8, 6, 4, 2, 0), byrow = TRUE, nrow = 5)
PenaltyMatrix
# Compute the penalty error of the baseline method by multiplying the penalty matrix by the classification matrix
as.matrix(table(ClaimsTest$bucket2009, ClaimsTest$bucket2008)) * PenaltyMatrix
sum(as.matrix(table(ClaimsTest$bucket2009, ClaimsTest$bucket2008)) * PenaltyMatrix) / nrow(ClaimsTest)
# Our goal will be to create a CART model that has an accuracy higher than 68% and a penalty error lower than 0.74.
# Quick Question 7
table(ClaimsTest$bucket2009) / nrow(ClaimsTest)
PenaltyMatrix[,1] * table(ClaimsTest$bucket2009)
sum(PenaltyMatrix[,1] * table(ClaimsTest$bucket2009)) / nrow(ClaimsTest)
library(rpart)
library(rpart.plot)
# Build the CART model
ClaimsTree = rpart(bucket2009 ~ age + arthritis + alzheimers + cancer + copd + depression + diabetes + heart.failure + ihd + kidney + osteoporosis + stroke + bucket2008 + reimbursement2008, data=ClaimsTrain, method="class", cp=0.00005)
prp(ClaimsTree)
# Make predictions on the test set
PredictTest = predict(ClaimsTree, newdata=ClaimsTest, type="class")
table(ClaimsTest$bucket2009, PredictTest)
# Accuracy
(114141 + 16102 + 118 + 201 + 0) / nrow(ClaimsTest)
# Penalty error
as.matrix(table(ClaimsTest$bucket2009, PredictTest)) * PenaltyMatrix
sum(as.matrix(table(ClaimsTest$bucket2009, PredictTest)) * PenaltyMatrix) / nrow(ClaimsTest)
# So while we increased the accuracy, the penalty error also went up. Why?
# By default, rpart will try to maximize the overall accuracy, and every type of error is seen as having a penalty of one.
# Our CART model predicts 3, 4, and 5 so rarely because there are very few observations in these classes.
# Add loss to the CART model and calculate new accuracy, penalty error values
ClaimsTree = rpart(bucket2009 ~ age + arthritis + alzheimers + cancer + copd + depression + diabetes + heart.failure + ihd + kidney + osteoporosis + stroke + bucket2008 + reimbursement2008, data=ClaimsTrain, method="class", cp=0.00005, parms=list(loss=PenaltyMatrix))
PredictTest = predict(ClaimsTree, newdata=ClaimsTest, type="class")
table(ClaimsTest$bucket2009, PredictTest)
(94310 + 18942 + 4692 + 636 + 2) / nrow(ClaimsTest)
as.matrix(table(ClaimsTest$bucket2009, PredictTest)) * PenaltyMatrix
sum(as.matrix(table(ClaimsTest$bucket2009, PredictTest)) * PenaltyMatrix) / nrow(ClaimsTest)
# Our accuracy (0.647) is now lower than the baseline model, but so is the penalty error (0.642)
# According to the penalty matrix, some of the worst types of errors are to predict bucket 1 when the actual cost bucket is higher.
# Therefore, the model with the penalty matrix predicted bucket 1 less frequently than the baseline model.
# CONCLUSIONS
# So we first observe that the overall accuracy of the method regarding the percentage that it accurately
# predicts is 80%, compared to 75% of the baseline. But notice that this is done in an interesting way.
# For bucket one patients, the two models are equivalent. But of course this suggests the idea
# that healthy people stay healthy, which is the idea of the baseline model. The cost repeats is valid in the data.
# But then for buckets two to five, notice that the accuracy increases substantially from 31% to 60%-- it doubles-- from 21% to 53%--
# more than doubles-- and from 19% to 39%-- doubles. There's an improvement from 23% to 30%, not as big as before,
# but there is indeed an improvement for bucket five. But notice the improvement on the penalty from 0.56 to 0.52 overall.
# A small improvement in bucket one, but a significant improvement as we increase on the buckets. For example, here for bucket five,
# the penalty error decreases from 1.88 to 1.01, a substantial improvement.
|
ee878bbbcf648cbe836804b84c01fb79f17dcce8 | 69cf4ab4c19c06164df43eb2fb11393ad5b79567 | /initials_brownlow_total.R | f1cf2d144542dd8e7eb74dc192660b099963fc2b | [] | no_license | ApacheStark/UselessAFLStats | de9ea99c8ede457e6b745f1072a480198d052b1b | 610c1a952ae998f08576ddf9124542e7a4646ce1 | refs/heads/master | 2023-01-23T15:51:00.737948 | 2020-12-04T08:19:58 | 2020-12-04T08:19:58 | 307,062,565 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,762 | r | initials_brownlow_total.R | library('fitzRoy')
library(tidyverse)
afttable <- fitzRoy::get_afltables_stats()
afttable_df <- afttable %>% as_tibble()
brownlow_votes <- afttable_df %>%
select(First.name,Surname,Brownlow.Votes) %>%
filter(Brownlow.Votes>0) %>%
mutate(First.init = toupper(str_sub(First.name, 1, 1)),
Second.init = toupper(str_sub(Surname, 1, 1)))
initials_votes <- brownlow_votes %>%
group_by(First.init, Second.init) %>%
summarise(Total.votes = sum(Brownlow.Votes, na.rm = TRUE)) %>%
arrange(desc(Total.votes))
bottom_initials <- initials_votes %>%
arrange(Total.votes)
top_names <- afttable_df_sub %>%
filter(First.init == initials_votes$First.init[1] &
Second.init == initials_votes$Second.init[1]) %>%
group_by(First.name, Surname) %>%
summarise(Total.votes = sum(Brownlow.Votes, na.rm = T)) %>%
arrange(desc(Total.votes))
second_names <- afttable_df_sub %>%
filter(First.init == initials_votes$First.init[2] &
Second.init == initials_votes$Second.init[2]) %>%
group_by(First.name, Surname) %>%
summarise(Total.votes = sum(Brownlow.Votes, na.rm = T)) %>%
arrange(desc(Total.votes))
third_names <- afttable_df_sub %>%
filter(First.init == initials_votes$First.init[3] &
Second.init == initials_votes$Second.init[3]) %>%
group_by(First.name, Surname) %>%
summarise(Total.votes = sum(Brownlow.Votes, na.rm = T)) %>%
arrange(desc(Total.votes))
bottom_names <- afttable_df_sub %>%
filter(First.init == bottom_initials$First.init[1] &
Second.init == bottom_initials$Second.init[1] |
First.init == bottom_initials$First.init[2] &
Second.init == bottom_initials$Second.init[2] |
First.init == bottom_initials$First.init[3] &
Second.init == bottom_initials$Second.init[3] |
First.init == bottom_initials$First.init[4] &
Second.init == bottom_initials$Second.init[4] |
First.init == bottom_initials$First.init[5] &
Second.init == bottom_initials$Second.init[5] |
First.init == bottom_initials$First.init[6] &
Second.init == bottom_initials$Second.init[6] |
First.init == bottom_initials$First.init[7] &
Second.init == bottom_initials$Second.init[7]) %>%
group_by(First.name, Surname) %>%
summarise(Total.votes = sum(Brownlow.Votes, na.rm = T)) %>%
arrange(desc(Total.votes))
data_list <- list(
'Votes_Data' = brownlow_votes,
'Top Initials' = initials_votes,
'Bottom Initials' = bottom_initials,
'Top Names' = top_names,
'2nd Top Names'= second_names,
'3rd Top Names'= third_names,
'1 Vote Initials' = bottom_names
)
saveRDS(data_list,
'OUT/initials_brownlow/brownlow_initial_votes.RDS')
|
98bc1a829d844c29e384705629b46df5484865fc | 5c0b70c38a3aaa810aa0b98d129aa138631c3b20 | /cachematrix.R | 84d201827fb1798c7d91ab26b16e518557249830 | [] | no_license | ncrowley13/ProgrammingAssignment2 | 768999e944e1e0dff0bbe0a3407937bb7e200e15 | 89b53a86e03b5649bf015b5a252f9fc8760324d0 | refs/heads/master | 2021-01-17T08:51:29.622571 | 2015-06-20T15:22:47 | 2015-06-20T15:22:47 | 37,751,071 | 0 | 0 | null | 2015-06-19T23:44:32 | 2015-06-19T23:44:32 | null | UTF-8 | R | false | false | 753 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## this main function stores a list of subset functions
## that act on an input matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<-y
m <<-NULL
}
get <-function() x
setinverse <- function(solve) m <<- solve
getinverse <- function () m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates the inverse of a matrix
##or calls it from the cache
cacheSolve <- function(x=solve(), ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
solve<-x$get()
m<-solve(solve, ...)
x$setinverse(m)
m
}
|
a80e790acd68c38e8a70c8adf07f815520bdc785 | d80a8ebc5dd9b2a9df78384dff096f50b450f9d1 | /R/Listener.R | 36e75480bd2635807ea635894f1dc4803765edad | [] | no_license | cran/beakr | 80fd36eb8ecbb6df5656d4a0c9fe4f44b10803c2 | 2355ab4fab157de08afa1057b1f05ae265aa1042 | refs/heads/master | 2021-07-15T18:37:19.962966 | 2021-04-06T16:20:02 | 2021-04-06T16:20:02 | 239,598,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 798 | r | Listener.R | #' @export
#' @title Listener class
#'
#' @description
#' A \code{Listener} object provides a simple, programmatically
#' controlled HTTP protocol listener.
#'
#' @usage NULL
#'
#' @format NULL
#'
#' @section Fields:
#'
#' \describe{
#' \item{\code{FUN}}{
#' Returns function response.
#' }
#' \item{\code{event}}{
#' Returns event type.
#' }
#' }
#'
#' @section Methods:
#'
#' \describe{
#' \item{\code{initialize(FUN, event)}}{
#' Sets instance object function and event state.
#' }
#' }
#'
#' @seealso \code{\link{Router}} and \code{\link{Error}}
#'
Listener <-
R6::R6Class(
classname = "Listener",
public = list(
FUN = NULL,
event = NULL,
initialize = function(event, FUN, ...) {
self$FUN = FUN
self$event = event
}
)
)
|
ebe4366a444ecbd187e1f416fd2744ac7ec81cc2 | e164bc5a3fbd19e230bec5a0cd6aad6a9ffc0bc1 | /munge/01-rdyads.R | 9ddf95ec42ed5551c099e0b4f69cc9d4a7cd0b67 | [] | no_license | jeromyanglim/anglim-redcards-2014 | 4c76b7b870d0dbaa0ded5e1d34203c28ae33c35a | ae973cea310917d7319ee06f6f817d441af5a51b | refs/heads/master | 2016-09-06T15:02:30.662115 | 2014-06-21T14:23:27 | 2014-06-21T14:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,919 | r | 01-rdyads.R | # Example preprocessing script.
v <- list()
v$player_variables <- c("PlayerID", "Player", "Club", "League_Country", "LeagueID",
"SkinCol", "PhotoID", "PlayerHeightCM", "PlayerBirthd", "BirthD",
"BirthM", "BirthY", "PositionID", "Position", "Position_Detailed")
v$ref_variables <- c("RefereeID", "RefCountryID", "RCperRef", "TotMatPerRef", "meanIAT_RefCountry",
"nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
v$iat_measures <- c("meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
# Fix variable codings
v$make_numeric <- c("BirthD", "meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
for (variable in v$make_numeric) {
rdyads[,variable] <- as.numeric(rdyads[,variable])
}
rdyads[rdyads$Position == "", 'Position'] <- NA
# Player and dyad variables
rdyads$SkinCol_4or5 <- as.numeric(rdyads$SkinCol == 4 | rdyads$SkinCol == 5) # binary coding
rdyads$SkinCol_0to1 <- (rdyads$SkinCol -1) / 4 # continuous and rescaled
# create player, referee, and referee country data frames
rplayers <- rdyads[!duplicated(rdyads$PlayerID), v$player_variables]
rrefs <- rdyads[!duplicated(rdyads$RefereeID), v$ref_variables]
rcountries <- rrefs[!duplicated(rrefs$RefCountryID), c('RefCountryID', v$iat_measures)]
# merge in totals to player
merge_sum <- function(variable='RedCards', id='PlayerID', data=rplayers, dyads=rdyads) {
x <- aggregate(dyads[, variable], list(dyads[, id]), sum)
newvar <- paste0(variable, 'Sum')
names(x) <- c(id, newvar)
merge(data,x, all.x=TRUE, sort=FALSE)
}
for (variable in c("Matches", "Goals", "YellowCards", "YellowRed", "RedCards")) {
rplayers <- merge_sum(variable, 'PlayerID', rplayers, rdyads)
rrefs <- merge_sum(variable, 'RefereeID', rrefs, rdyads)
rcountries <- merge_sum(variable, 'RefCountryID', rcountries, rdyads)
}
# create proportion variables
create_prop <- function(data=rdyads, variable='Goals', divisor='Matches') {
newvar <- paste0(variable, 'Prop')
data[,newvar] <- data[,variable] / data[,divisor]
data
}
for (variable in c('Goals', 'YellowCards', 'YellowRed', 'RedCards')) {
rdyads <- create_prop(rdyads, variable, 'Matches')
variable_sum <- paste0(variable, 'Sum')
rplayers <- create_prop(rplayers, variable_sum, 'MatchesSum')
rrefs <- create_prop(rrefs, variable_sum, 'MatchesSum')
rcountries <- create_prop(rcountries, variable_sum, 'MatchesSum')
}
# create modified predictors
rplayers$MatchesSum_log <- log(rplayers$MatchesSum)
rrefs$MatchesSum_log <- log(rrefs$MatchesSum)
# creeate interaction variables
make_interaction <- function(x1, x2, data=rdyads, separator="BY") {
interaction_variable_name <- paste0(x1, separator, x2)
data[ , interaction_variable_name] <- data[,x1] * data[,x2]
data
}
rdyads <- make_interaction('SkinCol', 'meanIAT_RefCountry')
|
f0d67de9b8bad6b16a2b4280ee720b6f95603823 | e96b48fe5cf40368bb37415eaffdb0314f52b70f | /tests/testthat/test-function-getInspectionNumbers.R | 3c1b977f92012fc481cc6f7cdccfc7fdaad0a0db | [
"MIT"
] | permissive | KWB-R/kwb.en13508.2 | 53a313cd45e018ecfefbb3a03b9874fa816ec7b6 | 94d17a473cd2a5d6a1a15dd01149195215060fed | refs/heads/master | 2022-12-11T07:05:49.095074 | 2019-09-09T13:47:30 | 2019-09-09T13:47:30 | 131,375,390 | 0 | 0 | MIT | 2023-09-14T08:28:56 | 2018-04-28T05:18:17 | R | UTF-8 | R | false | false | 1,022 | r | test-function-getInspectionNumbers.R | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("getInspectionNumbers() works", {
expect_error(
kwb.en13508.2:::getInspectionNumbers(indices.C = 1, indices.B01 = 1)
# argument "indices.B" is missing, with no default
)
expect_error(
kwb.en13508.2:::getInspectionNumbers(indices.C = 1, indices.B01 = list(key = c("a", "b"), value = 1:2))
# (list) object cannot be coerced to type 'double'
)
expect_error(
kwb.en13508.2:::getInspectionNumbers(indices.C = 1:2, indices.B01 = list(key = c("a", "b"), value = 1:2))
# (list) object cannot be coerced to type 'integer'
)
expect_error(
kwb.en13508.2:::getInspectionNumbers(indices.C = "a", indices.B01 = as.POSIXct("2018-06-03 23:50:00"))
# character string is not in a standard unambiguous format
)
expect_error(
kwb.en13508.2:::getInspectionNumbers(indices.C = TRUE, indices.B01 = list(key = c("a", "b"), value = 1:2))
# (list) object cannot be coerced to type 'logical'
)
})
|
b653a52d757805f7744f0cff60fdafd6b5ee2943 | 357f1a79af257d1b954eecd5ab724aeb45a75889 | /man/GETVotacoes.Rd | 5961295c369053324178703dc347ab139ec782a3 | [] | no_license | matfmc/congressoR | 208c878cce6aa0f5211182b46a3ac49883d6f494 | f06e2517c140bd7bf613484487a34d34f4ddce3e | refs/heads/master | 2022-11-23T16:50:20.827939 | 2020-07-23T19:38:32 | 2020-07-23T19:38:32 | 281,163,119 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,946 | rd | GETVotacoes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{GETVotacoes}
\alias{GETVotacoes}
\title{Função que retorna Lista das votações da Câmara}
\usage{
GETVotacoes(
id = NULL,
idProposicao = NULL,
idEvento = NULL,
idOrgao = NULL,
dataInicio = NULL,
dataFim = NULL,
pagina = NULL,
itens = NULL,
ordem = NULL,
ordenarPor = NULL
)
}
\arguments{
\item{id}{Um ou mais identificador(es) alfanuméricos de votação, separados por vírgulas, para que seja(m) listado(s) dados sobre uma ou mais votações específicas.}
\item{idProposicao}{Um ou mais identificador(es) numéricos de proposições, que podem ser obtidos por meio do recurso /proposicoes. Se presente, listará as votações que tiveram a(s) proposição(ções) como objeto de votação ou que afetaram as proposições listadas.}
\item{idEvento}{Identificador de um ou mais evento(s) realizado(s) na Câmara, separados por vírgula, nos quais tenham sido realizadas as votações a serem listadas. Os identificadores podem ser obtidos por meio do recurso /eventos. Somente os eventos deliberativos podem ter votações. Os eventos podem ter ocorrido fora do intervalo de tempo padrão ou definido por dataInicio e/ou dataFim.}
\item{idOrgao}{Um ou mais identificador(es) numéricos de órgãos da Câmara, separados por vírgulas. Se presente, serão retornadas somente votações dos órgãos enumerados. Os identificadores existentes podem ser obtidos por meio do recurso /orgaos.}
\item{dataInicio}{Data em formato AAAA-MM-DD para início do intervalo de tempo no qual tenham sido realizadas as votações a serem listadas. Se usado sozinho, esse parâmetro faz com que sejam retornadas votações ocorridas dessa data até o fim do mesmo ano. Se usado com dataFim, as duas datas devem ser de um mesmo ano.}
\item{dataFim}{Data em formato AAAA-MM-DD que define o fim do intervalo de tempo no qual tenham sido realizadas as votações a serem listadas. Se usado sozinho, esse parâmetro faz com que sejam retornadas todas as votações ocorridas desde 1º de janeiro do mesmo ano até esta data. Se usado com dataInicio, é preciso que as duas datas sejam de um mesmo ano.}
\item{pagina}{Número da página de resultados, a partir de 1, que se deseja obter com a requisição, contendo o número de itens definido pelo parâmetro itens. Se omitido, assume o valor 1.}
\item{itens}{Número máximo de itens na página que se deseja obter com esta requisição. O valor padrão e máximo para este endpoint é 200, e valores maiores serão ignorados.}
\item{ordem}{O sentido da ordenação: asc para A a Z ou 0 a 9, e desc para Z a A ou 9 a 0.}
\item{ordenarPor}{Nome do campo pelo qual a lista será ordenada. Pode ser id, idOrgao, siglaOrgao, idEvento, idProposicao, data, dataHoraRegistro ou idProposicaoObjeto.}
}
\description{
Retorna uma lista de informações básicas sobre as votações ocorridas em eventos dos diversos órgãos da Câmara.
Se não forem passados parâmetros que delimitem o intervalo de tempo da pesquisa, são retornados dados sobre todas as votações ocorridas nos últimos 30 dias, em eventos de todos os órgãos.
Os parâmetros de data permitem estender o período, mas por enquanto é necessário que as duas datas sejam de um mesmo ano. Quando apenas uma delas está presente, são retornadas somente as votações ocorridas no mesmo ano, antes de dataFim ou após dataInicio.
Também é possível filtrar a listagem por identificadores de órgãos da Câmara, de proposições e de eventos.
Quando não há identificação da proposição que foi efetivamente votada, é preciso consultar o endpoint /votacoes/{id} para obter uma lista de proposições das quais uma pode ter sido o objeto da votação.
Para mais informações sobre o uso dos endpoints de votações, veja a página de tutorial do Portal de Dados Abertos.
}
\examples{
GETVotacoes()
}
\keyword{votações}
|
b32dc63d9bcb0b0f38abf4abf6b8d41a93782ab1 | 2ee62142697fde94a911838a88d348df60e918e6 | /man/is_outgroup.Rd | cefc5c50901485901fbdf2283d22bfbe5e32d68b | [] | no_license | andrewparkermorgan/mouser | f9f8b11b127e30d8314e844397de1ed5dd20c2e5 | 16ead9859e8f2c1021fb4eb14b81b65531084bec | refs/heads/master | 2022-05-19T02:52:22.739616 | 2022-05-11T03:53:39 | 2022-05-11T03:53:39 | 62,641,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 424 | rd | is_outgroup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\name{is_outgroup}
\alias{is_outgroup}
\title{Test if the listed taxa are outgroups with respect to Mus musculus}
\usage{
is_outgroup(x, ..)
}
\arguments{
\item{x}{a vector of values to test (will be coerced to character)}
}
\value{
a logical vector
}
\description{
Test if the listed taxa are outgroups with respect to Mus musculus
}
|
5493a80666761378e6e1f69b454f7db36edfc040 | 00a50d686281a4a7dc22c4322c65808663b9a193 | /R Code/Mz_regressions/MZ_regressions_A.R | 7a500da3f3d6f7bb67b16bd62f5e2f8a46e77a79 | [] | no_license | kevintikvic/RealizedVol_From_10K_Sentiment | b61bc58a0a6513fe02c84d808a9260e2d1e38eab | db0d1f0c8b06eea6218a1b104e0df55a00d6accf | refs/heads/main | 2023-04-09T16:11:11.740392 | 2021-04-28T18:15:09 | 2021-04-28T18:15:09 | 348,022,000 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,445 | r | MZ_regressions_A.R |
# PREFIX ------------------------------------------------------------------
# --------------------------------------------------------------------------------------- #
# #
# Author: Kevin Tikvic #
# Filename: MZ_regressions_A #
# Description: 1) load LHS and RHS for variant A (2013-2017 all-in-one regressions) #
# 2) regress LHS on RHS using 7 different weighting schemes #
# 3) export nice stargazer latex table for the different models #
# #
# Date (last updated): July 23th, 2018 #
# #
# --------------------------------------------------------------------------------------- #
# Libraries / Settings ----------------------------------------------------
rm(list=ls()) # clear environment
gc()
options(scipen = 999)
library(stargazer)
# setwd("/Volumes/LaCie/LM_data/")
# setwd("/Users/kevin/Desktop/lm_testdata")
setwd("/Users/kevin/Desktop/lm_testdata/")
# load data
load("./regressions/A/A_MZ_inputs.RData")
# extract the RHS that all models will have in common:
common.rhs <- MZ_RHS[1:159] %>% cbind(., FIN = MZ_RHS$FIN) %>% as_tibble()
# seems I forgot size variable; load via DB5:
load("./DB5_316x46483.RData")
common.rhs <- common.rhs %>% cbind(.,
(DB5 %>%
filter(repyear >= 2013) %>%
select(size_ta))) %>%
as_tibble()
# note: this DF contains both GARCH and GJR variables, we will use either or
# A(1): VIBTW -------------------------------------------------------------
rhs.vibtw <- MZ_RHS %>% select(ends_with("_VIBTW"))
colnames(rhs.vibtw) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.vibtw,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A1.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.vibtw,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A1.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(2): TFIDF -------------------------------------------------------------
rhs.tfidf <- MZ_RHS %>% select(ends_with("_TFIDF"))
colnames(rhs.tfidf) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.tfidf,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A2.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.tfidf,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A2.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(3): RFIDF -------------------------------------------------------------
rhs.rfidf <- MZ_RHS %>% select(ends_with("_RFIDF"))
colnames(rhs.rfidf) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.rfidf,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A3.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.rfidf,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A3.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(4): WFIDF_1PLOG -------------------------------------------------------------
rhs.wfidf.1plog <- MZ_RHS %>% select(ends_with("WFIDF_1P"))
colnames(rhs.wfidf.1plog) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.wfidf.1plog,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A4.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.wfidf.1plog,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A4.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(5): WFIDF_LOG1P -------------------------------------------------------------
rhs.wfidf.log1p <- MZ_RHS %>% select(ends_with("WFIDF_P1"))
colnames(rhs.wfidf.log1p) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.wfidf.log1p,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A5.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.wfidf.log1p,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A5.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(6): WF_1PLOG -------------------------------------------------------------
rhs.wf.1p <- MZ_RHS %>% select(ends_with("WF_1P"))
colnames(rhs.wf.1p) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.wf.1p,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A6.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.wf.1p,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A6.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(7): WF_LOG1P -------------------------------------------------------------
rhs.wf.p1 <- MZ_RHS %>% select(ends_with("WF_P1"))
colnames(rhs.wf.p1) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.wf.p1,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A7.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.wf.p1,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A7.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# A(8): TFMAX -------------------------------------------------------------
rhs.tfmax <- MZ_RHS %>% select(ends_with("_TFMAX"))
colnames(rhs.tfmax) <- c("NEG_SENT", "POS_SENT", "ASSERT", "UNCERT", "LITI")
RHS <- cbind(TS_FC = common.rhs$log_garch,
rhs.tfmax,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble() # all dummy controls
A8.GARCH <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
RHS <- cbind(TS_FC = common.rhs$log_gjr,
rhs.tfmax,
GFS = common.rhs$log_GFS,
FIN = common.rhs$FIN,
SIZE = common.rhs$size_ta,
BTM = common.rhs$log_BTM,
TRVOL = common.rhs$log_TVOL,
VIX = common.rhs$med_VIX_pre,
LEVER = common.rhs$leverage,
common.rhs[8:159]) %>% as_tibble()
A8.GJR <- lm(as.matrix(MZ_LHS) ~ as.matrix(RHS))
# Table Export ------------------------------------------------------------
stargazer(A1.GARCH, A2.GARCH, A3.GARCH, A4.GARCH, A5.GARCH,
A6.GARCH, A7.GARCH, A8.GARCH,
out = "/Users/kevin/Dropbox/Master_Thesis/LaTeX_File/Results_Tables/A_MZ_regression_GARCH",
type = "latex", summary.logical = F,
omit = ".reptype|sic|repyear|repmth|weekday|repday.",
align = F, out.header = F, header = F,
intercept.bottom = T, initial.zero = F, digits = 3,
digit.separate = 3, digit.separator = ",", font.size = "small",
label = "tab: A_MZ_regression_garch",
title = "Regression results: eight different weighting schemes (GARCH)",
model.numbers = T,
notes.align = "l",
notes = "Standard errors in parentheses. TS_FC is the 1-week ahead GARCH(1,1)-forecast.",
notes.append = T,
dep.var.caption = "Dependent variable: PFRV",
dep.var.labels.include = F, omit.stat = c("ser", "f"))
stargazer(A1.GJR, A2.GJR, A3.GJR, A4.GJR, A5.GJR,
A6.GJR, A7.GJR, A8.GJR,
out = "/Users/kevin/Dropbox/Master_Thesis/LaTeX_File/Results_Tables/A_MZ_regression_GJR",
type = "latex", summary.logical = F,
omit = ".reptype|sic|repyear|repmth|weekday|repday.",
align = F, out.header = F, header = F,
intercept.bottom = T, initial.zero = F, digits = 3,
digit.separate = 3, digit.separator = ",", font.size = "small",
label = "tab: A_MZ_regression_gjr",
title = "Regression results: eight different weighting schemes (GJR-GARCH)",
model.numbers = T,
notes.align = "l",
notes = "Standard errors in parentheses. TS_FC is the 1-week ahead GJR-GARCH(1,1)-forecast.",
notes.append = T,
dep.var.caption = "Dependent variable: PFRV",
dep.var.labels.include = F, omit.stat = c("ser", "f"))
|
6a0d0acfcd52ef2264db17ecb1f221037d999d03 | 2634acca4cc1f402d2b5e55df00b3e02537d7fef | /MetDADevelopmentR/rscript/data_transformation.R | 0b84730880a1fd9d5642894d20961853c4f6f6ff | [] | no_license | slfan2013/MetDA-development | 05d8042fd541b89e20123624ac4add3cef5beb2f | 99a434df6d23fa229020404eac7d06d6d737887a | refs/heads/master | 2020-05-31T03:16:05.555673 | 2019-09-03T18:00:17 | 2019-09-03T18:00:17 | 190,076,747 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,824 | r | data_transformation.R | # result = list(a = defination_of_missing_value)
# http://localhost:5656/ocpu/tmp/x08f655c87c/
pacman::p_load(data.table)
data = call_fun(parameter = list(project_id=project_id,activate_data_id=activate_data_id,fun_name="read_data_from_projects"))
e = data$e
f = data$f
p = data$p
if(method == "log10"){
result = log((e + sqrt(e^2 + 4)) * 0.5, base = 10)
}else if(method == "log2"){
result = log((e + sqrt(e^2 + 4)) * 0.5, base = 2)
}else if(method == "square root"){
result = e^(1/2)
}else if(method == "cubic root"){
result = e^(1/3)
}else{ # boxcox
if(length(treatment_group)==0){
stop("For boxcox method, please select at least one Treatment Group.")
}
mins = apply(e,1,min,na.rm = TRUE)
result = e-(mins-1)
lambdas = apply(result,1,function(x){
# for(i in 1:nrow(e)){
# x = e[i,]
dta = data.table(x,p[,treatment_group])
bc = tryCatch({boxcox(x ~ ., data = dta, plotit = FALSE)}, error = function(error){
return(list(y =1,x = 1))
})
lambda = bc$x[which.max(bc$y)]
# }
return(lambda)
})
for(i in 1:nrow(result)){
if(lambdas[i] == 0){
result[i,] = log(result[i,] + 0)
}else{
result[i,] = (((result[i,] + 0)^lambdas[i] ) - 1) / lambdas[i]
}
}
result = result+(mins-1)
}
e_tran = result
rownames_e_tran = rownames(e_tran)
e_tran = data.frame(e_tran)
colnames(e_tran) = p$label
rownames(e_tran) = rownames_e_tran
result_dataset = e_tran
write.csv(result_dataset, "e.csv", col.names = TRUE, row.names = TRUE)
report_html = call_fun(parameter = list(
method = method,
treatment_group = treatment_group,
type = "result_summary",
result = result,
doc = NULL,
type = "result_summary",
fun_name = "report_data_transformation"
))$text_html
result = list(results_description = report_html)
|
5bb6047ef9fca34dd3a3b414fef92ec305ed5508 | db54eb647c27f0ba6b328dd3391c9d7c4e5879ba | /R/rows_and_cols.R | 30ad8fc3c8d5568166e56ae8807c9db77d3f27db | [
"MIT"
] | permissive | hoxo-m/pforeach | 0fa383640e9e8e05f19bf0b61934a672dda2093b | 2c44f3bf651a4b2d5d5657bf8be3a94f93769871 | refs/heads/master | 2021-01-24T06:09:12.056995 | 2015-04-24T15:27:44 | 2015-04-24T15:27:44 | 26,576,040 | 39 | 6 | null | null | null | null | UTF-8 | R | false | false | 549 | r | rows_and_cols.R | #'Iterator for data frame by row
#'
#'@param df a data frame
#'
#'@export
rows <- function(df) {
iterators::iter(df, by="row")
}
#'Iterator for data frame by column
#'
#'@param df a data frame
#'
#'@export
cols <- function(df) {
iterators::iter(df, by="col")
}
#'Iterator for row number of a data frame
#'
#'@param df a data frame
#'
#'@export
irows <- function(df) {
iterators::icount(nrow(df))
}
#'Iterator for column number of a data frame
#'
#'@param df a data frame
#'
#'@export
icols <- function(df) {
iterators::icount(ncol(df))
}
|
a5a541c18ad50fbb28f711babc8775de8aeadfc3 | 7a7375245bc738fae50df9e8a950ee28e0e6ec00 | /R/SA2__Year_LabourForceStatus_Sex_adults.R | 271423ce6ff64ea499f6c182443181cc39084aa4 | [] | no_license | HughParsonage/Census2016.DataPack.TimeSeries | 63e6d35c15c20b881d5b337da2f756a86a0153b5 | 171d9911e405b914987a1ebe4ed5bd5e5422481f | refs/heads/master | 2021-09-02T11:42:27.015587 | 2018-01-02T09:01:39 | 2018-01-02T09:02:17 | 112,477,214 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 208 | r | SA2__Year_LabourForceStatus_Sex_adults.R | #' @title LabourForceStatus, Sex by SA2, Year
#' @description Number of adultsLabourForceStatus, Sex by SA2, Year
#' @format 83,160 observations and 5 variables.
"SA2__Year_LabourForceStatus_Sex_adults"
|
94961f2ea4462ca8bb067580748a0876906c60b4 | da9d04bd4144087341a4e4e92c9754973c12184d | /plot4.R | 7936008d83e93cff0f6e3ffca2bb602d2c704df9 | [] | no_license | ucg8j/ExData_Plotting1 | edc3489bc694802f3073a573803acefb282c69c5 | 2050691093dc1584978d565152e7bb78c82e1f5e | refs/heads/master | 2021-01-16T21:22:21.976156 | 2016-02-07T07:45:49 | 2016-02-07T07:45:49 | 51,236,549 | 0 | 0 | null | 2016-02-07T05:19:51 | 2016-02-07T05:19:50 | null | UTF-8 | R | false | false | 1,403 | r | plot4.R | # read data
df <- read.table("household_power_consumption.txt"
, sep = ";"
, header = T
, stringsAsFactors = F
, na.strings="?"
, colClasses=c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
# subset
df <- df[df$Date %in% c("1/2/2007","2/2/2007") ,]
# extra formatting of dates
df$datetime <- strptime(paste(df$Date, df$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# open graphics object
png(filename="plot4.png", width=480, height=480)
# set up multi plot
par(mfrow = c(2, 2))
# make plot1
plot(df$datetime, df$Global_active_power, type="l", ylab="Global Active Power", xlab = "")
# make plot2
plot(df$datetime, df$Voltage, type="l", xlab="datetime", ylab="Voltage")
# make plot3
plot(df$datetime, df$Sub_metering_1, type="l"
, xlab="", ylab="Energy sub metering")
lines(df$datetime, df$Sub_metering_2, type="l", col = 'red')
lines(df$datetime, df$Sub_metering_3, type="l", col = 'blue')
legend("topright", bty = "n", col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1, cex = 0.40)
# make plot4
plot(df$datetime, df$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
# close graphics object
dev.off() |
6b902963d42db65c844d4a98c95b573ba3bfbaee | 709a6f9fff0506d6639a4502655c9f6f83d9b5b0 | /scripts/10_gbm.R | c954cc3b1d17ecfeec239eca51ed0cf1c8b422bd | [] | no_license | loremarchi/hands-on-machine-learning-R-module-2 | d5fa44278f2fb52eac6925d0f0d2b281b9b25d2b | 2050995b266faf14fb762be5fb79857e44851ac3 | refs/heads/main | 2023-07-14T20:47:56.614164 | 2021-08-29T09:24:12 | 2021-08-29T09:24:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,900 | r | 10_gbm.R | ##############################################
# (Stochastic) Gradient Boosting Machines
##############################################
## --------------------------------------------------------------------------------------------------------------------------------------------------
library(tidyverse)
set.seed(54321) # reproducibility
dfr <- tibble::tibble(
x = seq(0, 2 * pi, length.out = 500),
m = 2 * sin(x),
y = m + rnorm(length(x), sd = 1))
## --------------------------------------------------------------------------------------------------------------------------------------------------
library(gbm)
fit <- gbm(formula = y ~ x,
data = dfr,
distribution = 'gaussian',
n.trees = 10,
interaction.depth = 1,
shrinkage = 1)
## --------------------------------------------------------------------------------------------------------------------------------------------------
pred <- predict(fit,
n.trees = fit$n.trees,
type = 'response')
## --------------------------------------------------------------------------------------------------------------------------------------------------
preds <- do.call(rbind, lapply(0:fit$n.trees,
function(i) dfr %>% mutate(iter = i,
pred = predict(fit, n.trees = i, type = 'response'))))
library(gganimate)
library(transformr)
preds %>% ggplot(aes(x = x)) +
geom_point(aes(y = y), alpha = 0.3) +
geom_line(aes(y = m), colour = 'darkgreen', size = 1.5) +
geom_line(aes(y = pred), colour = 'darkred', size = 1.5) +
transition_states(iter, transition_length = 0.1, state_length = 0.5) + labs(title = "Iteration: {closest_state}") + theme_bw()
## --------------------------------------------------------------------------------------------------------------------------------------------------
fit <- gbm(formula = y ~ x,
data = dfr,
distribution = 'gaussian',
n.trees = 10,
interaction.depth = 1,
shrinkage = 0.1)
## --------------------------------------------------------------------------------------------------------------------------------------------------
preds <- do.call(rbind, lapply(0:fit$n.trees,
function(i) dfr %>% mutate(iter = i,
pred = predict(fit, n.trees = i, type = 'response'))))
preds %>% ggplot(aes(x = x)) +
geom_point(aes(y = y), alpha = 0.3) +
geom_line(aes(y = m), colour = 'darkgreen', size = 1.5) +
geom_line(aes(y = pred), colour = 'darkred', size = 1.5) +
transition_states(iter, transition_length = 0.1, state_length = 0.5) + labs(title = "Iteration: {closest_state}") + theme_bw()
## --------------------------------------------------------------------------------------------------------------------------------------------------
fit <- gbm(formula = y ~ x,
data = dfr,
distribution = 'gaussian',
n.trees = 10,
interaction.depth = 3,
shrinkage = 0.1)
## --------------------------------------------------------------------------------------------------------------------------------------------------
preds <- do.call(rbind, lapply(0:fit$n.trees,
function(i) dfr %>% mutate(iter = i,
pred = predict(fit, n.trees = i, type = 'response'))))
preds %>% ggplot(aes(x = x)) +
geom_point(aes(y = y), alpha = 0.3) +
geom_line(aes(y = m), colour = 'darkgreen', size = 1.5) +
geom_line(aes(y = pred), colour = 'darkred', size = 1.5) +
transition_states(iter, transition_length = 0.1, state_length = 0.5) + labs(title = "Iteration: {closest_state}") + theme_bw()
## --------------------------------------------------------------------------------------------------------------------------------------------------
fit <- gbm(formula = y ~ x,
data = dfr,
distribution = 'gaussian',
n.trees = 10,
interaction.depth = 3,
shrinkage = 1)
## --------------------------------------------------------------------------------------------------------------------------------------------------
preds <- do.call(rbind, lapply(0:fit$n.trees,
function(i) dfr %>% mutate(iter = i,
pred = predict(fit, n.trees = i, type = 'response'))))
preds %>% ggplot(aes(x = x)) +
geom_point(aes(y = y), alpha = 0.3) +
geom_line(aes(y = m), colour = 'darkgreen', size = 1.5) +
geom_line(aes(y = pred), colour = 'darkred', size = 1.5) +
transition_states(iter, transition_length = 0.1, state_length = 0.5) + labs(title = "Iteration: {closest_state}") + theme_bw()
## --------------------------------------------------------------------------------------------------------------------------------------------------
fit <- gbm(formula = y ~ x,
data = dfr,
distribution = 'gaussian',
n.trees = 300,
interaction.depth = 3,
shrinkage = 0.01)
## --------------------------------------------------------------------------------------------------------------------------------------------------
plot_pred_reg <- function(dt, preds){
dt %>% mutate(pred = preds) %>% ggplot(aes(x = x)) +
geom_point(aes(y = y), alpha = 0.3) +
geom_line(aes(y = m), colour = 'darkgreen', size = 1.5) +
geom_line(aes(y = pred), colour = 'darkred', size = 1.5) + theme_bw()
}
plot_pred_reg(dt = dfr, preds = predict(fit, n.trees = fit$n.trees, type = 'response'))
## Your Turn!
## --------------------------------------------------------------------------------------------------------------------------------------------------
|
ae59702957b8e15cf0c20b62e1bfd1f170064911 | b1a33601063045d53cb876aab6d107fc981f85d4 | /man/n_countries.Rd | 5a01d42aac5d50f64b0a58b441e4e99a183e77e8 | [] | no_license | dedi0003/cwdata | f752fce3fbdb5332a34f7c5c9d837546c7629db2 | b12d34b56728d0412693c49dfef83d1f880b99b9 | refs/heads/master | 2022-12-23T22:52:59.292569 | 2020-10-04T11:53:08 | 2020-10-04T11:53:08 | 301,117,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 309 | rd | n_countries.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count.R
\name{n_countries}
\alias{n_countries}
\title{Count the number of countries}
\usage{
n_countries(data)
}
\arguments{
\item{data}{a dataset containing a \code{country} column}
}
\description{
Count the number of countries
}
|
9c6c868a5c3526b5a8892b516600ce5c059bd65e | 3bc5905d66434160b2974f25612e34492140f550 | /bean_beetles_sim.R | eae1534279a934675db3a9393519b61a962432fb | [] | no_license | rreareyes/BIO153 | f86eb83606379a464d9f397748caa5c6cd64753b | 5fee0239ddc3d9ee6c255d0011a64a145e0f97b8 | refs/heads/main | 2023-03-02T22:35:25.403891 | 2021-02-12T22:09:34 | 2021-02-12T22:09:34 | 338,444,563 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,000 | r | bean_beetles_sim.R | # Simulate Bean Beetle experiment -----------------------------------------
#
# This function creates a dataset for a single student that contains data
# from a specific number of dishes, beetles and with whatever observations
# you want (but it defaults to 60, since that was the original set up of
# one observation per minute).
#
# To simulate the data I used a multinomial distribution that takes a vector
# of probabilities for each of the regions in the petri dish, that defines
# how likely a beetle would go to that region at each time.
#
# As you can see in the function arguments, it defaults to create a dataset
# with two dishes with 3 regions, each with 1 female, that has no preference
# for any region in particular. This vector needs to add up to 1 (the
# function rounds up values if necessary, but it cannot exceed 1)
#
# The number of regions is defined by the length of the vector you create
# for the probabilities associated to each region. For example, to create
# a dish with 4 regions, you need to supply the function with a vector like:
#
# c(0.2, 0.1, 0.2, 0.5)
#
# The function creates a column for each region that contains the number
# of females present in each observation. If you assign more females, each
# row of the dataset will add up to whatever number you have there.
#
# It also needs you to specify the student name, this should be a
# character vector, which will be appended at the end of the spreadsheet
# name, e.g.:
#
# bean_beetles_RamiroRea
#
# Avoid using spaces here, since that can create conflicts sometimes when
# writing the file.
#
# Finally, I included two options to save the data. You could either save
# all as a .csv and then upload it to google sheets and open it with that,
# which will create a copy of the data as a .gsheet file.
#
# Alternatively, if you feel adventurous like me, you can save this data
# straight to your google drive in that format. For this you need to install
# the package "googlesheets4"
#
# Additionally, you will need to authenticate once per session to link R to
# the Sheets API using the command:
#
# gs_auth()
#
# And then uncomment the line:
#
# gs4_create(name = paste("bean_beetles_", student, sep = ""), sheets = experiment_data)
#
# This will save the sheet to your drive root folder, and you can just move
# it later to the directory you use for your class. Hope you find this useful!
#
# Written by Ramiro Eduardo Rea Reyes
# https://github.com/rreareyes
# BIO153
# February 2021
# Examples ----------------------------------------------------------------
#
# 2 dishes with 3 regions, 3 females, high preference for 2nd region
#
# bean_beetle_sim(n_females = 3, observations = 60, n_dishes = 2, probs = c(0.1, 0.8, 0.1), student = "Ramiro")
#
# 3 dishes with 4 regions, 1 female, high preference for 1st region
#
# bean_beetle_sim(n_females = 1, observations = 60, n_dishes = 3, probs = c(0.6, 0.1, 0.2, 0.1), student = "Ramiro")
# -------------------------------------------------------------------------
require(tidyverse)
#require(googlesheets4)
bean_beetle_sim <- function(n_females = 1, observations = 60, n_dishes = 2, probs = c(0.3, 0.3, 0.3), student = ""){
dish_samples <- list()
dish_data <- list()
for (iDish in 1:n_dishes) {
dish_samples[[iDish]] <- rmultinom(observations, n_females, probs)
dish_data[[iDish]] <- as.data.frame(t(dish_samples[[iDish]])) %>%
rename_with(~ tolower(gsub("V", paste("dish", iDish, "_region", sep = ""), .x, fixed = TRUE))) %>%
mutate(observation = 1:observations)
}
experiment_data <- reduce(dish_data, full_join, by = "observation") %>%
relocate(observation)
#gs4_create(name = paste("bean_beetles_", student, sep = ""), sheets = experiment_data)
write.csv(experiment_data, file = paste("bean_beetles_", student, ".csv", sep = ""), row.names = F)
} |
a86ed3039b7729cd521161a5222203ba6964ce64 | a6d2cb141e97b23c6179e6f83a6abc6c23978894 | /HB_SVM.R | 3cfa639374f8cc02c7c46a7ed486c4dd415ca872 | [] | no_license | JMNData/HiggsBoston | 7867024f3b847291844ba4a1600dde2d6b82e8d4 | 31caa8d016027b493eeb3b9599b76116c09a128c | refs/heads/master | 2020-12-30T11:15:28.484099 | 2014-09-24T02:19:53 | 2014-09-24T02:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,304 | r | HB_SVM.R | #install.packages("RODBC")
#INITIALIZE LIBRARIES
library(RODBC)
library("parallel")
library("randomForest")
library("rpart")
library("e1071")
library("randomForest")
library("gbm")
library("klaR")
#Set Globals
setwd("C:\\Users\\Administrator\\Documents\\GitHub\\HiggsBoston")
options(scipen=999)
#GET DATA
myconn = odbcConnect("HB")
train = sqlQuery(myconn, "select * from training")
close(myconn)
train.input = subset(train, select=-c(EventId))
train.input = cbind (train.input[32:32], sapply(train.input[1:31],as.numeric))
model = randomForest(Label~., data=train.input)
myconn = odbcConnect("HB")
test = sqlQuery(myconn, "select * from test")
close(myconn)
test.input = subset(test, select=-c(EventId))
test.input = sapply(test.input[1:30], as.numeric)
Predicted.test = cbind(test, predicted = predict(model, test.input, interval="predict", type="response"))
Predicted.test.prob = cbind(Predicted.test, predictedp = predict(model, test.input, interval="predict", type="prob"))
Predicted.test.prob = cbind(Predicted.test.prob, rank = sapply(rank(Predicted.test.prob[33:33]), as.numeric))
out = c("EventId", "rank", "predictedp.class")
Predicted.test.prob$rank = format(Predicted.test.prob$rank, scientific = FALSE)
write.csv(Predicted.test.prob[out], "data\\resultsRF.csv", row.names = FALSE)
|
d8b957c5cb67f68643562313d3d5f91004dd615c | 7e91349fa2202bc3920e8ff728f8bbfbd871b3d8 | /6-PathwayHeterogeneity/runGSEA_preRank.R | b1dc17043d741cc3202227d66dceb289ac2ae531 | [
"MIT"
] | permissive | zhengtaoxiao/Single-Cell-Metabolic-Landscape | 812dc12fd284b575145c855c72196bea5d008222 | a89f4215e66e48c2540e09a8679529add39f7e38 | refs/heads/master | 2021-11-25T11:26:22.555380 | 2021-11-10T01:26:45 | 2021-11-10T01:26:45 | 160,257,851 | 24 | 7 | MIT | 2021-11-10T01:23:59 | 2018-12-03T21:43:21 | R | UTF-8 | R | false | false | 836 | r | runGSEA_preRank.R | runGSEA_preRank<-function(preRank.matrix,gmt.file,outname){
#descending numerical order
#dump preRank into a tab-delimited txt file
write.table(preRank.matrix,
file='prerank.rnk',
quote=F,
sep='\t',
col.names=F,
row.names=T)
#call java gsea version
command <- paste('java -Xmx512m -cp ../gsea-3.0.jar xtools.gsea.GseaPreranked -gmx ', gmt.file, ' -norm meandiv -nperm 1000 -rnk prerank.rnk ',
' -scoring_scheme weighted -make_sets true -rnd_seed 123456 -set_max 500 -set_min 15 -zip_report false ',
' -out preRankResults -create_svgs true -gui false -rpt_label ',outname, sep='')
if(get_os() == "win"){
system(command,show.output.on.console=F)
}else{
system(command)
}
unlink(c('prerank.txt'))
}
|
b08e3e0fa83ed20239a0e4ecd129b3690e3c891a | 212ac6787936c05182408d738ed82f1501ae35e7 | /HapMap.R | d88e237e95a7b73549f0d71df48c12c587b40e28 | [] | no_license | catmarper/RaukRProject | 640257994a1aa9d987cf2f8df84c093bf523c2d8 | 2c454df5888dc73082ccf65f177987517aa5cb6e | refs/heads/master | 2020-06-05T21:13:14.026774 | 2019-06-19T19:41:08 | 2019-06-19T19:41:08 | 192,546,433 | 0 | 8 | null | 2019-06-19T19:41:09 | 2019-06-18T13:32:49 | HTML | UTF-8 | R | false | false | 6,815 | r | HapMap.R | #From Marcin: HapMap Autoencoder
#autoencoder as a replacement/complement to MDS/PCA used for visualising population structure in genetics
#head(summary(data_raw@gtdata))#summary of the genetic data
#Getting rid off the chromosome 39 (X chromosome)
#data_autosomal <- data_raw[,data_raw@gtdata@chromosome != "39"]
# Compute genomic kinship-based distances
# Compute genomic kinship-based distances, takes long time
gkin <- ibs(data_raw, weight = 'freq')
save(gkin, file="gkin.rdat") #making .rdat file from this
head(gkin)
dm <- as.dist(.5 - gkin) # Normalize it
#PCA
# PCA
pca <- prcomp(dm) #Principal components analysis
library(devtools)
#install_github("vqv/ggbiplot") #to get the ggbiplot
library(ggbiplot)
color_9 <- c("black", "dodgerblue4", "bisque4", "darkgreen","yellow3", "darkgoldenrod3", "darkorchid4", "darkred", "grey")
g <- ggbiplot(pca, obs.scale = 1, var.scale = 1,
groups = data_raw@phdata$Breed, ellipse = F, #using breed as a group
circle = TRUE, var.axes = F) +
ggtitle("PCA")+
scale_color_manual(values = color_9)
#g <- g + scale_color_discrete(name = '')
#g <- g + theme()
#PCA without X chromosome
#ga <- ggbiplot(pca, obs.scale = 1, var.scale = 1,
# groups = data_autosomal@phdata$Breed, ellipse = F, #using breed as a group
# circle = TRUE, var.axes = F) +
# ggtitle("PCA autosomal") +
# scale_color_manual(values = color_9)
#ga <- ga + scale_color_discrete(name = '')
#ga <- ga + theme()
#MDS
# MDS
ibs <- as.data.frame(cmdscale(dm))
ibs <- cbind(ibs, pop = data_raw@phdata$Breed) #same as group at PCA
z <-ggplot(ibs, mapping = aes(x=V1, y=V2, col=pop)) +
geom_point() +
ggtitle("MDS") +
scale_color_manual(values = color_9) +
theme(legend.position = 'none') #removing legend
#MDS from autosomal
#ibsa <- as.data.frame(cmdscale(dm))
#ibsa <- cbind(ibs, pop = data_autosomal@phdata$Breed) #same as group at PCA
#za <-ggplot(ibsa, mapping = aes(x=V1, y=V2, col=pop)) +
# geom_point() +
# ggtitle("MDS autosomal") +
# scale_color_manual(values = color_9)+
# theme(legend.position = 'none') #removing legend
library(gridExtra)
library(grid)
library(ggplot2)
library(lattice)
library(ggpubr)
#arranging the two grids side by side with one legend
ggarrange(g,z, ncol=2, nrow=1, common.legend = TRUE, legend="bottom")
#Autoencoder
#we define model parameters: loss function set to the mean squared error and activation layer set to ReLU.
loss_fn <- 'binary_crossentropy'
act <- 'relu'
# Input data is first normalized so that:
#* homozygotes AA are set to 1
#* heterozygotes to 0.5 * homozygotes aa to 0 Next,
#the data are split into the validation (20%) and the training (80%) set.
# Encode genotypes
qc1 <- check.marker(data = data_raw, callrate = 1) #to remove NAs
geno_matrix <- as.double(data_raw[qc1$idok, qc1$snpok]) #to remove NAs
geno_tensor <- geno_matrix/2 #keras::to_categorical(geno_matrix)
# Split into the training and the validation set
n_rows <- dim(geno_tensor)[1]
train_idx <- sample(1:n_rows, size = 0.8 * n_rows, replace = F)
train_data <- geno_tensor[train_idx, ]
valid_data <- geno_tensor[-train_idx, ]
#Autoencoder
#we define model parameters: loss function set to the mean squared error and activation layer set to ReLU.
loss_fn <- 'binary_crossentropy'
act <- 'relu'
# Input data is first normalized so that:
#* homozygotes AA are set to 1
#* heterozygotes to 0.5 * homozygotes aa to 0 Next,
#the data are split into the validation (20%) and the training (80%) set.
library(tensorflow)
library(keras)
#Did not work for me I had to run these commands
#Newest version of keras has a bug. That is not fixed. Use older version
#devtools::install_github("rstudio/tensorflow")
#devtools::install_github("rstudio/keras")"
#tensorflow::install_tensorflow()
#tensorflow::tf_config()
input_layer <- layer_input(shape = dim(train_data)[2])
encoder <-
input_layer %>%
layer_dense(units = 1500, activation = act) %>%
layer_batch_normalization() %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 500, activation = act) %>%
layer_dropout(rate = 0.1) %>%
layer_dense(units = 25, activation = act) %>%
layer_dense(units = 2) # bottleneck
decoder <-
encoder %>%
layer_dense(units = 25, activation = act) %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 500, activation = act) %>%
layer_dropout(rate = 0.1) %>%
layer_dense(units = 1500, activation = act) %>%
layer_dense(units = dim(train_data)[2], activation = "sigmoid")
autoencoder_model <- keras_model(inputs = input_layer, outputs = decoder)
autoencoder_model %>% compile(
loss = loss_fn,
optimizer = 'adam',
metrics = c('accuracy')
)
summary(autoencoder_model)
#Training phase
#Now the model is trained, loss and accuracy are evaluated on both the training and the external validation set.
#This run takes some time
history <- autoencoder_model %>% fit(
x = train_data,
y = train_data,
epochs = 120,
shuffle = T,
batch_size = 256,
validation_data = list(valid_data, valid_data)
)
plot(history)
#visualize the embeddings
# difference between the original data and the reconstructed points
reconstructed_points <-
keras::predict_on_batch(autoencoder_model, x = train_data)
delta <- abs(train_data - reconstructed_points)
heatmap(delta[1:100, 1:100], Rowv = NA, Colv = NA,
col=heat.colors(5), scale = 'none')
#HOX!! Works untill here. There is a problem with the package. Hopefully this
#will be fixed in few weeks.
#Building the encoder
autoencoder_weights <- autoencoder_model %>% keras::get_weights()
keras::save_model_weights_hdf5(object = autoencoder_model,
filepath = './autoencoder_weights.hdf5',
overwrite = TRUE)
encoder_model <- keras_model(inputs = input_layer, outputs = encoder)
#Gives an error: You are trying to load a weight file containing 9 layers into a model with 5 layers.
encoder_model %>% keras::load_model_weights_hdf5(filepath = "./autoencoder_weights.hdf5",
skip_mismatch = TRUE,
by_name = F)
encoder_model %>% compile(
loss = loss_fn,
optimizer = 'adam',
metrics = c('accuracy')
)
#Embedding original data in the low-dimensional space using the encoder.
embeded_points <-
encoder_model %>%
keras::predict_on_batch(x = geno_tensor)
#Final results
embedded <- data.frame(embeded_points[,1:2],
pop = data_raw@phdata$breed,
type='emb')
mds <- cbind(ibs, type='mds')
colnames(mds) <- c('x', 'y', 'pop', 'type')
colnames(embedded) <- c('x', 'y', 'pop', 'type')
dat <- rbind(embedded, mds)
dat %>% ggplot(mapping = aes(x=x, y=y, col=pop)) +
geom_point() +
facet_wrap(~type, scales = "free")
|
5ece6787f8e207d3e4689ad34f8e1bd0ebe14dd5 | 2e54be2072252c1250e4c80248a775b8d91d2798 | /man/indexPat.Rd | 96d4d8dcce4e6fcf79652269e6afda995334d2ff | [
"MIT"
] | permissive | SICSresearch/IRTpp | 397af58ac686701565f1718ea32b68ea580b2d35 | 529ecc0ab14898cbf1862d96addb78689f08997a | refs/heads/master | 2020-12-29T00:59:44.650919 | 2016-06-20T15:18:34 | 2016-06-20T15:18:34 | 30,204,560 | 3 | 3 | null | 2015-11-05T13:10:34 | 2015-02-02T19:38:11 | R | UTF-8 | R | false | true | 313 | rd | indexPat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{indexPat}
\alias{indexPat}
\title{Index pattern}
\usage{
indexPat(data, pats)
}
\arguments{
\item{data}{The pattern matrix}
\item{pats}{The indexed pattern}
}
\description{
The index of the pattern
}
\keyword{internal}
|
539078c3c24aae6495dded3eefe22a95e248a3ee | 3f88e08aaa9bce00f01ccfa31ee3d777deddc1e6 | /inst/shiny-examples/analytic/labels.R | 468d3e6ace27bbc7373d573baaf5d8c1773bf756 | [] | no_license | Kenkleinman/clusterPower | 8a22730597762066f211208ff57b8f42f1dffaaf | cfa3a00a14c381a824eb399c05596b252003795a | refs/heads/master | 2023-04-06T14:42:26.428117 | 2021-01-30T01:03:52 | 2021-01-30T01:03:52 | 9,607,628 | 3 | 6 | null | 2023-04-04T23:27:25 | 2013-04-22T19:57:35 | R | UTF-8 | R | false | false | 7,550 | r | labels.R | # parameters common to at least two outcomes
alphatext <- HTML('α (alpha)')
alphatooltip <- 'Type I error rate. Should be between 0 and 1, preferably close to 0 (e.g. 0.05).'
alphavalidmsg <- 'Type I error rate should be between 0 and 1.'
powertext <- 'Power (power)'
powertooltip <- 'Power of the test. Should be between 0 and 1, preferably close to 1 (e.g. 0.80 or 0.90).'
powervalidmsg <- 'Power should be between 0 and 1.'
nclusterstext <- 'Clusters per arm (nclusters)'
nclusterstooltip <- 'The number of clusters per arm.'
nsubjectstext <- 'Subjects per cluster (nsubjects)'
nsubjectstooltip <- 'The mean sample size per cluster.'
p1text <- 'Proportion 1 (p1)'
p1tooltip <- 'The expected proportion in the treatment group.'
p2text <- 'Proportion 2 (p2)'
p2tooltip <- 'The proportion in the control group.'
p1inctext <- 'p1 > p2'
p1inctooltip <- 'Select to indicate that the treatment group proportion is greater than the control group proportion. This option is needed only when the target quantity is either "p1" or "p2". If both "p1" and "p2" are given this option has no effect.'
icctext <- 'ICC (icc)'
icctooltip <- 'Intracluster correlation coefficient. Two (2) of ICC, vart, sigma_b,and sigma_b_sq must be supplied.'
cvtext <- 'Cluster size CV (cv)'
cvtooltip <- 'Coefficient of variation of the cluster sizes. When this equals 0, all clusters have the same size.'
# -----------------------------------------------------------------------------
# specific to 2mean:
varttext <- 'Total variance (vart)'
varttooltip <- 'The total variation of the outcome. Two (2) of ICC, vart, sigma_b,and sigma_b_sq must be supplied.'
sigma_sqtext <- 'Within-cluster variance (sigma_sq)'
sigma_sqtooltip <- 'Within-cluster variation. Two (2) of ICC, vart, sigma_b,and sigma_b_sq must be supplied.'
sigma_b_sqtext <- 'Between-cluster variance (sigma_b_sq)'
sigma_b_sqtooltip <- 'Between-cluster variation. Two (2) of ICC, vart, sigma_b,and sigma_b_sq must be supplied.'
dtext <- 'Difference (d)'
dtooltip <- 'Expected difference in condition means.'
methodtext <- 'Unequal Cluster Size Adjustment'
methodtooltip <- 'Method for calculating the variance inflation and design effect due to unequal cluster sizes. When CV = 0, "method" has no effect.'
# -----------------------------------------------------------------------------
# specific to 2meanD:
dDtext <- 'Difference in difference (d)'
dDtooltip <- 'Expected net difference in outcome statistics (means, proportions, etc.).'
rho_ctext <- 'Within cluster correlation (rho_c)'
rho_ctooltip <- 'The correlation between baseline and post-test outcomes at the cluster level. Used in both cohort and cross-sectional designs. A value of "0" is a conservative estimate.'
rho_stext <- 'Within subject correlation (rho_s)'
rho_stooltip <- 'The correlation between baseline and post-test outcomes at the subject level. For a purely cross-sectional design, this value should be 0.'
# -----------------------------------------------------------------------------
# specific to SW:
rho_cSWtext <- 'Within cluster correlation (rho_c)'
rho_cSWtooltip <- 'The correlation over time between outcomes at the cluster level. Used in both cohort and cross-sectional designs. A value of "0" is a conservative estimate.'
rho_sSWtext <- 'Within subject correlation (rho_s)'
rho_sSWtooltip <- 'The correlation overtime between outcomes at the subject level. For a purely cross-sectional design, this value should be 0.'
ntimestext <- "Number of time points (ntimes)"
ntimestooltip <- "Number of time points (not counting baseline)"
nclustersSWtext <- 'Clusters per arm (nclusters)'
nclustersSWtooltip <- 'The number of clusters shifting to treatment at each time point.'
# -----------------------------------------------------------------------------
# specific to 2meanDltf:
ltf_0text <- 'Control LTF rate (ltf_0)'
ltf_0tooltip <- 'Proportion of controls lost to follow-up.'
ltf_1text <- 'Treatment LTF rate (ltf_1)'
ltf_1tooltip <- 'Proportion of treatment lost to follow-up.'
replacetext <- 'Replace? (replace)'
replacetooltip <- 'Replace individuals who are lost to follow-up.'
# -----------------------------------------------------------------------------
# specific to 2meanM:
rho_mtext <- 'Matching correlation (rho_m)'
rho_mtooltip <- 'The correlation in outcome used between matched clusters.'
# -----------------------------------------------------------------------------
# specific to nmean:
narmstext <- 'Number of arms (narms)'
narmstooltip <- 'Total number of arms (conditions) in the trial. Must be greater than 2'
varatext <- 'Between-arm variance (vara)'
varatooltip <- 'Variance between the arm means.'
varctext <- 'Between-cluster variance (varc)'
varctooltip <- 'Variance between the cluster means.'
varetext <- 'Within-cluster variance (vare)'
varetooltip <- 'Variance within clusters, i.e. residual error.'
# -----------------------------------------------------------------------------
# specific to 2prop:
pooledtext <- 'Pooled'
pooledtooltip <- 'Select to indicate if pooled variance is desired.'
ttesttext <- 't-test'
ttesttooltip <- 'Select to use a t-distribution instead of a z-distribution.'
# -----------------------------------------------------------------------------
# specific to 2propD:
ptext <- "Expected mean proportion (p)"
ptooltip <- "The expected mean proportion at the post-test, averaged across treatment and control arms."
# -----------------------------------------------------------------------------
# specific to 2propM:
cvmtext <- "Within-pair Outcome CV (cvm)"
cvmtooltip <- "The coefficient of variation in the outcome within matched clusters."
# specific to 2rate:
r1text <- 'Rate 1 (r1)'
r1tooltip <- 'The expected rate in the treatment group.'
r2text <- 'Rate 2 (r2)'
r2tooltip <- 'The expected rate in the control group.'
r1inctext <- 'r1 > r2'
r1inctooltip <- 'Select to indicate that the treatment group rate is greater than the control group rate. This option is needed only when the target quantity is either "r1" or "r2". If both "r1" and "r2" are given this option has no effect.'
pytext <- 'Person-years per cluster (py)'
pytooltip <- 'Person years per cluster.'
cvbtext <- 'Between-cluster CV (cvb)'
cvbtooltip <- 'The coefficient of variation of the person years per cluster. Analogous to ICC for two continuous outcomes.'
# static/button text
defaulttext <- 'Defaults'
clearalltext <- 'Clear All'
calctext <- 'Calculate'
dltext <- 'Download'
powercheck <- 'Power must be between 0 and 1.'
alphacheck <- 'Type I error rate, α, must be between 0 and 1.'
credittext <- 'App created by Jon Moyer and Ken Kleinman; support from NIGMS grant R01GM121370.'
# graphs
ylab <- 'Y'
xlab <- 'X'
grouplab <- 'Group'
colorlab <- 'Color by Group'
rowlab <- 'Row'
collab <- 'Column'
heightlab <- 'Plot Height'
widthlab <- 'Plot Width'
psizelab <- 'Point Size'
lsizelab <- 'Line Size'
# labels for the various quantities
graph_labels <- c(
`alpha` = "Level of Significance",
`power` = "Power",
`d` = "Difference",
`nclusters` = "Mean Clusters Per Arm",
`nsubjects` = "Mean Subjects Per Cluster",
`icc` = "ICC",
`vart` = "Total Variance",
`cv` = "Cluster Size CV",
`rho_c` = "Within-Cluster Correlation",
`rho_s` = "Within-Subject Correlation",
`rho_m` = "Matching Correlation",
`p1` = "Treatment Proportion",
`p2` = "Control Propotion",
`cvm` = "Within-Pair Outcome CV",
`r1` = "Treatment Rate",
`r2` = "Control Rate",
`py` = "Person-Years Per Cluster",
`cvb` = "Between-Cluster CV")
|
548efb1d52c5e709a8a89fbea4428e87555fd28a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/gcKrig/R/predGHK.R | 9e4c6b56336b5f7e14dcb4310d6920a6ccd44ae6 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 22,763 | r | predGHK.R |
#######################################################################################################################
#######################################################################################################################
#### Inner function only used to be called by another functions inside package
#######################################################################################################################
#######################################################################################################################
#### param: Last one (or two) elements are: Range and Nugget (if applicable)
#### The first (1 + number of covariates) parameters are: regression parameters
#### Before Range parameter: overdispersion parameter (if applicable)
#### y: counts.
#### x: covariates
#### kappa: smoothness parameter in matern marginal of correlations
ghkgcmr_R <- function(R, pdf, cdf, nrep){
.Call('ghkgcmr', PACKAGE = 'gcKrig', R, pdf, cdf, nrep)
}
ghkgcmr2_R <- function(R, pdf, cdf, nrep){
.Call('ghkgcmr2', PACKAGE = 'gcKrig', R, pdf, cdf, nrep)
}
likGHK <- function(
pars, y, x = NULL, locs, marginal, corr, effort, longlat = FALSE, distscale = 1,
nrep = 1000, reorder = FALSE, seed = 12345
){
if(longlat == FALSE){
matD <- as.matrix(dist(locs, method = "euclidean", diag = TRUE, upper = TRUE))*distscale
}else if(longlat == TRUE){
if (requireNamespace("sp", quietly = TRUE)) {
matD <- sp::spDists(x = as.matrix(locs), longlat = TRUE)*distscale
}else{
stop("Please install {sp} first!")
}
}
npars <- length(pars)
nparcorr <- corr$npar.cor
if(marginal$nod == 1 & pars[ncol(x)+1] < 0){
pars[ncol(x)+1] <- 0
}
if(corr$nug == 0 & pars[npars] < 0){
pars[npars] <- 0
}else if(corr$nug == 1){
pars[npars] <- ifelse(pars[npars] > 1, 1, pars[npars])
pars[npars] <- ifelse(pars[npars] < 0, 0, pars[npars])
pars[npars-1] <- ifelse(pars[npars-1] < 0, 0, pars[npars-1])
}
R <- corr$corr(pars[(npars-nparcorr+1):npars], D = matD)
pdf = marginal$pdf(y = y, x = x, pars = pars, effort = effort)
cdf = marginal$cdf(y = y, x = x, pars = pars, effort = effort)
set.seed(seed)
if(reorder == FALSE){
loglik <- ghkgcmr_R(R = R, pdf = pdf, cdf = cdf, nrep = nrep)
} else{
loglik <- ghkgcmr2_R(R = R, pdf = pdf, cdf = cdf, nrep = nrep)
}
if(is.nan(loglik) | loglik == -Inf) loglik = -1e6
if (loglik == Inf) loglik = 1e6
return(-loglik)
}
profilelikGHK <- function(
theta, par.index, fitted, fixvalue = NULL, single = TRUE, alpha, nrep = 1000, seed = 12345
){
if(single == TRUE){
optpar <- append(fixvalue, theta, after = par.index-1)
llik <- likGHK(pars = optpar, y = fitted$args$y, x = fitted$args$x, locs = fitted$args$locs,
marginal = fitted$args$marginal, corr = fitted$args$corr, effort = fitted$args$effort,
longlat = fitted$args$longlat, distscale = fitted$args$distscale,
nrep = nrep, reorder = FALSE, seed = seed)
llikout <- (-llik - (fitted$log.lik-qchisq(1-alpha,1)/2) )^2
}else{
optpar <- append(theta, fixvalue, after = par.index-1)
llikout <- likGHK(pars = optpar, y = fitted$args$y, x = fitted$args$x, locs = fitted$args$locs,
marginal = fitted$args$marginal, corr = fitted$args$corr, effort = fitted$args$effort,
longlat = fitted$args$longlat, distscale = fitted$args$distscale,
nrep = nrep, reorder = FALSE, seed = seed)
}
return(llikout)
}
mleProfilelikGHK <- function(
fitted, par.index, fixvalue = NULL, single = TRUE, start, alpha, nrep = 1000, seed = 12345
){
if(single == TRUE){
est <- optim(start, profilelikGHK, par.index = par.index, fitted = fitted,fixvalue = fixvalue,
single = TRUE, alpha = alpha, nrep = nrep, seed = seed, method = 'L-BFGS-B',
lower = fitted$optlb[par.index], upper = fitted$optub[par.index])$par
}else{
est <- optim(start, profilelikGHK, par.index = par.index, fitted = fitted, fixvalue = fixvalue,
single = FALSE, alpha = alpha, nrep = nrep, seed = seed, method = 'L-BFGS-B',
lower = fitted$optlb[-par.index], upper = fitted$optub[-par.index])$par
}
return(est)
}
#######################################################################################################################
#######################################################################################################################
#### Inner function only used to be called by another functions inside package
#######################################################################################################################
#######################################################################################################################
mleGHK <- function(
y, x = NULL, locs, marginal, corr, effort = 1, longlat = FALSE, distscale = 1, corrpar0 = NULL,
ghkoptions = list(nrep = c(100,1000), reorder = FALSE, seed = 12345)
){
if(longlat == FALSE){
D <- as.matrix(dist(locs, method = "euclidean", diag = TRUE, upper = TRUE))*distscale
}else if(longlat == TRUE){
if (requireNamespace("sp", quietly = TRUE)) {
D <- sp::spDists(x = as.matrix(locs), longlat = TRUE)*distscale
}else{
stop("Please install {sp} first!")
}
}
N <- length(y)
if(length(effort) == 1 & effort[1] == 1) effort <- rep(1,N)
if(!is.null(x)){
x <- as.data.frame(x)
x <- cbind(rep(1,N), x)
}else{
x <- cbind(rep(1,N), x)
}
marg0 <- marginal$start(y = y, x = x, effort = effort)
n.nugget0 <- corr$nug
n.reg0 <- ncol(x)
n.od0 <- marginal$nod
if(is.null(corrpar0)){
corpar0 <- corr$start(D)
}else{
corpar0 <- corrpar0
names(corpar0) <- names(corr$start(D))
}
est <- c(marg0, corpar0)
optlb <- c(rep(-Inf,n.reg0), rep(0, n.od0), 0, rep(0, n.nugget0))
optub <- c(rep(Inf,n.reg0), rep(Inf, n.od0), Inf, rep(1, n.nugget0))
for(j in 1:length(ghkoptions$nrep)){
fit <- optim(par = est, fn = likGHK, y = y, x = x, locs = locs, marginal = marginal, corr = corr,
effort = effort, longlat = longlat, distscale = distscale, nrep = ghkoptions$nrep[j],
reorder = ghkoptions$reorder, seed = ghkoptions$seed, method = "L-BFGS-B",
lower = optlb, upper = optub)
est <- fit$par
}
kmarg <- length(marg0)
k <- length(est)
if (is.null(fit$convergence) || fit$convergence != 0)
warnings("Maximum likelihood estimation failed. Algorithm does not converge or MLEs do not exist.")
result.list <- list(MLE = est,
x = x,
nug = n.nugget0,
nreg = n.reg0,
log.lik = -fit$value,
AIC = 2*k+2*fit$value,
AICc = 2*k+2*fit$value + 2*k*(k+1)/(N-k-1),
BIC = 2*fit$value + k*log(N),
kmarg = kmarg,
par.df = k,
N = N,
D = D,
optlb = optlb,
optub = optub,
args = mget(names(formals()),sys.frame(sys.nframe())))
return(result.list)
}
likGHKXX <- function(pars, y, XX, locs, marginal, corr, effort, longlat, distscale, nrep, seed)
{
likGHK(pars = pars, y = y, x = cbind(rep(1, length(y)), XX), locs = locs, marginal = marginal, corr = corr,
effort = effort, longlat = longlat, distscale = distscale, nrep = nrep, reorder = FALSE, seed = seed)
}
#######################################################################################################################
#######################################################################################################################
#### OUTPUT FUNCTION: Prediction using GHK simulator: serial version
#######################################################################################################################
#######################################################################################################################
#### Output: n(number of prediction locations)*2 matrix
#### First Column: predicting value
#### Second Column: Estimated MSPE
predGHK <- function(
obs.y, obs.x = NULL, obs.locs, pred.x = NULL, pred.locs, longlat = FALSE, distscale = 1,
marginal, corr, obs.effort = 1, pred.effort = 1, estpar = NULL,
corrpar0 = NULL, pred.interval = NULL,
ghkoptions = list(nrep = c(100, 1000), reorder = FALSE, seed = 12345)
){
x <- as.data.frame(cbind(rep(1,length(obs.y)), obs.x))
colnames(x)[1] <- c("Intercept")
if(!is.matrix(pred.locs) & !is.data.frame(pred.locs))
stop("Input 'pred.locs' must be a data frame or matrix!")
if(!is.matrix(obs.locs) & !is.data.frame(obs.locs))
stop("Input 'obs.locs' must be a data frame or matrix!")
if(length(obs.effort) == 1) obs.effort <- rep(obs.effort, nrow(obs.locs))
if(!length(obs.effort) == nrow(obs.locs))
stop("Sampling Effort must be equal to the number of sampling locations!")
if(length(pred.effort) == 1) pred.effort <- rep(pred.effort, nrow(pred.locs))
if(!length(pred.effort) == nrow(pred.locs))
stop("Prediction Effort must be equal to the number of prediction locations!")
#### First calculate the MLE and output the log-likelihood as denominator
if(is.null(estpar)){
MLE.est <- mleGHK(y = obs.y, x = x[,-1], locs = obs.locs, marginal = marginal, corr = corr, effort = obs.effort,
longlat = longlat, distscale = distscale, corrpar0 = corrpar0, ghkoptions = ghkoptions)
loglik <- MLE.est$log.lik; estpar <- MLE.est$MLE
}else{
loglik <- -likGHK(pars = estpar, y = obs.y, x = x, locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[length(ghkoptions$nrep)],
reorder = ghkoptions$reorder, seed = ghkoptions$seed)
}
if(is.null(pred.x)) {
pred.x <- matrix(1, nrow = nrow(pred.locs), ncol = 1)
}else {
pred.x <- cbind(rep(1, nrow(pred.locs)) , pred.x)
}
pred.x <- as.data.frame(pred.x)
names(pred.x) <- names(x)
if(nrow(pred.x)!= nrow(pred.locs))
stop("Number of prediction locations did not match rows of covariates")
if (requireNamespace("FNN", quietly = TRUE)) {
if(nrow(pred.locs) == 1){
indexloc <- which.min(FNN::get.knnx(pred.locs, obs.locs, 1)$nn.dist)
m0 <- n0 <- round(unique(pred.effort)*obs.y[indexloc]/obs.effort[indexloc])+1
}else{
m0 <- n0 <- round(pred.effort*apply(pred.locs, 1, function(x) obs.y[which.min(FNN::get.knnx(t(as.matrix(x)),
obs.locs, 1)$nn.dist)]/obs.effort[which.min(FNN::get.knnx(t(as.matrix(x)), obs.locs, 1)$nn.dist)]))+1
}
}else{
stop("Please install {FNN} first!")
}
NPL <- length(m0)
#### m0 and n0: initial prediction values for probability search. Scalor or Vector
max.count <- ceiling(5*max(obs.y/obs.effort)*max(pred.effort))
#### A for loop which cannot be avoided
if(is.null(pred.interval)){
ans <- matrix(NA, nrow = NPL, ncol = 2)
} else if(pred.interval >=0 & pred.interval<=1 ) {
ans <- matrix(NA, nrow = NPL, ncol = 6)
}else stop("Input pred.interval must be a number between 0 and 1!")
nnrep <- length(ghkoptions$nrep)
for(j in 1:NPL){
tmpfun <- function(xtmp) {
exp(-likGHK(pars = estpar, y = c(obs.y, xtmp), x = rbind(as.matrix(x), pred.x[j,]),
locs = rbind(obs.locs, pred.locs[j,]), marginal = marginal, corr = corr,
effort = c(obs.effort, pred.effort[j]), longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[nnrep], reorder = ghkoptions$reorder, seed = ghkoptions$seed) - loglik)
}
p.m0 <- p.n0 <- tmpfun(m0[j]); mu.m0 <- mu.n0 <- p.m0*m0[j]; mu2.m0 <- mu2.n0 <- p.m0*m0[j]^2
MM1 <- matrix(0, nrow = 2, ncol = 4); MM2 <- matrix(0, nrow = 2, ncol = 4)
MM1[1,] <- c(p.m0, mu.m0, mu2.m0, m0[j]); MM2[1,] <- c(p.n0, mu.n0, mu2.n0, n0[j])
p.m0 <- tmpfun(m0[j]-1); mu.m0 <- p.m0*(m0[j]-1)
mu2.m0 <- p.m0*(m0[j]-1)^2; MM1[2,] <- c(p.m0, mu.m0, mu2.m0, m0[j]-1)
while( (p.m0 > sqrt(.Machine$double.eps) | MM1[nrow(MM1), 2] > MM1[nrow(MM1)-1, 2]) & m0[j] > 1)
{
p.m0 <- tmpfun(m0[j]-2); mu.m0 <- p.m0*(m0[j]-2); mu2.m0 <- p.m0*(m0[j]-2)^2
MM1 <- rbind(MM1, c(p.m0, mu.m0, mu2.m0, m0[j]-2)); m0[j] <- m0[j]-1
}
#### Search from n0 to the right
p.n0 <- tmpfun(n0[j]+1); mu.n0 <- p.n0*(n0[j]+1); mu2.n0 <- p.n0*(n0[j]+1)^2
MM2[2, ] <- c(p.n0, mu.n0, mu2.n0, n0[j]+1)
while( (p.n0 > sqrt(.Machine$double.eps) | MM2[nrow(MM2), 2] > MM2[nrow(MM2)-1, 2]) & n0[j] < max.count)
{
p.n0 <- tmpfun(n0[j]+2); mu.n0 <- p.n0*(n0[j]+2); mu2.n0 <- p.n0*(n0[j]+2)^2
MM2 <- rbind(MM2, c(p.n0, mu.n0, mu2.n0, n0[j]+2)); n0[j] <- n0[j]+1
}
MM2 <- MM2[-1, ]; MM.all <- rbind(MM1, MM2); weight <- 1/sum(MM.all[,1])
if(!is.null(pred.interval)){
#### Equal Tail (alpha/2) Prediction Interval
pd <- cbind(MM.all[,4], MM.all[,1]*weight )
pd1 <- pd[order(pd[,1]), ];pd1 <- cbind(pd1, cumsum(pd1[,2]))
id1 <- suppressWarnings(ifelse(max(which(pd1[,3] <= (1-pred.interval)/2 ))==-Inf, 0,
max(which(pd1[,3] <= (1-pred.interval)/2 ))))
id2 <- min(which(pd1[,3] >= 1-(1-pred.interval)/2 ))
L1 <- id1; U1 <- id2-1
#### Shortest Length Prediction Interval
pd2 <- pd[order(pd[,2], decreasing = TRUE),]
pd2 <- cbind(pd2, cumsum(pd2[,2]))
id3 <- which(pd2[,3] >= pred.interval)[1]
L2 <- min(pd2[1:id3,1]); U2 <- max(pd2[1:id3,1])
ans[j, ] <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2,
L1, U1, L2, U2)
}else{
ans[j, ] <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2)
}
}
if(!is.null(pred.interval)){
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2],
ConfidenceLevel = pred.interval,
predInterval.EqualTail = ans[,3:4],
predInterval.Shortest = ans[,5:6]))
}else{
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2]))
}
return(anslist)
}
#######################################################################################################################
#######################################################################################################################
#### OUTPUT FUNCTION: Prediction using GHK simulator: parallel version via snowfall.
#######################################################################################################################
#######################################################################################################################
predGHK.sf <- function(
obs.y, obs.x = NULL, obs.locs, pred.x = NULL, pred.locs, longlat = FALSE, distscale = 1,
marginal, corr, obs.effort = 1, pred.effort = 1, estpar = NULL,
corrpar0 = NULL, pred.interval = NULL,
n.cores = 2, cluster.type="SOCK", ghkoptions = list(nrep = c(100,1000), reorder = FALSE, seed = 12345)
){
x <- as.data.frame(cbind(rep(1,length(obs.y)), obs.x))
colnames(x)[1] <- c("Intercept")
if(is.null(ghkoptions[["nrep"]])) ghkoptions$nrep = c(100,1000)
if(is.null(ghkoptions[["reorder"]])) ghkoptions$reorder = FALSE
if(is.null(ghkoptions[["seed"]])) ghkoptions$seed = 12345
if(!is.matrix(pred.locs) & !is.data.frame(pred.locs))
stop("Input 'pred.locs' must be a data frame or matrix!")
if(!is.matrix(obs.locs) & !is.data.frame(obs.locs))
stop("Input 'obs.locs' must be a data frame or matrix!")
if(length(obs.effort) == 1) obs.effort <- rep(obs.effort, nrow(obs.locs))
if(!length(obs.effort) == nrow(obs.locs))
stop("Sampling Effort must be equal to the number of sampling locations!")
if(length(pred.effort) == 1) pred.effort <- rep(pred.effort, nrow(pred.locs))
if(!length(pred.effort) == nrow(pred.locs))
stop("Prediction Effort must be equal to the number of prediction locations!")
#### First calculate the MLE and output the log-likelihood as denominator
if(is.null(estpar)){
MLE.est <- mleGHK(y = obs.y, x = x[,-1], locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
corrpar0 = corrpar0, ghkoptions = ghkoptions)
loglik <- MLE.est$log.lik; estpar <- MLE.est$MLE
}else{
loglik <- -likGHK(pars = estpar, y = obs.y, x = x, locs = obs.locs, marginal = marginal, corr = corr,
effort = obs.effort, longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[length(ghkoptions$nrep)],
reorder = ghkoptions$reorder, seed = ghkoptions$seed)
}
if(is.null(pred.x)) {
pred.x <- matrix(1, nrow = nrow(pred.locs), ncol = 1)
}else {
pred.x <- cbind(rep(1, nrow(pred.x)) , pred.x)
}
pred.x <- as.data.frame(pred.x)
colnames(pred.x)[1] <- c("Intercept")
names(pred.x) <- names(x)
if(nrow(pred.x)!= nrow(pred.locs))
stop("Number of prediction locations did not match the number of covariates")
if (requireNamespace("FNN", quietly = TRUE)) {
if(nrow(pred.locs) == 1){
indexloc <- which.min(FNN::get.knnx(pred.locs, obs.locs, 1)$nn.dist)
m0 <- n0 <- round(unique(pred.effort)*obs.y[indexloc]/obs.effort[indexloc])+1
}else{
m0 <- n0 <- round(pred.effort*apply(pred.locs, 1, function(x) obs.y[which.min(FNN::get.knnx(t(as.matrix(x)),
obs.locs, 1)$nn.dist)]/obs.effort[which.min(FNN::get.knnx(t(as.matrix(x)), obs.locs, 1)$nn.dist)]))+1
}
}else {
stop("Please install {FNN} first!")
}
NPL <- length(m0)
max.count <- ceiling(5*max(obs.y/obs.effort)*max(pred.effort))
nnrep <- length(ghkoptions$nrep)
#### Begin to parallel
if (requireNamespace("snowfall", quietly = TRUE)) {
snowfall::sfInit(parallel =TRUE, cpus = n.cores, type = cluster.type)
suppressMessages(snowfall::sfExportAll(except = NULL, debug = FALSE))
suppressMessages(snowfall::sfLibrary("gcKrig", character.only= TRUE))
#snowfall::sfClusterSetupRNG()
# sfClusterEvalQ( ls() )
par.pred.inner <- function(j){
tmpfun <- function(xtmp) {
exp(-likGHK(pars = estpar, y = c(obs.y, xtmp), x = rbind(as.matrix(x), pred.x[j,]),
locs = rbind(obs.locs, pred.locs[j,]), marginal = marginal, corr = corr,
effort = c(obs.effort, pred.effort[j]), longlat = longlat, distscale = distscale,
nrep = ghkoptions$nrep[nnrep], reorder = ghkoptions$reorder, seed = ghkoptions$seed) - loglik)
}
p.m0 <- p.n0 <- tmpfun(m0[j]); mu.m0 <- mu.n0 <- p.m0*m0[j]; mu2.m0 <- mu2.n0 <- p.m0*m0[j]^2
MM1 <- matrix(0, nrow = 2, ncol = 4); MM2 <- matrix(0, nrow = 2, ncol = 4)
MM1[1,] <- c(p.m0, mu.m0, mu2.m0, m0[j]); MM2[1,] <- c(p.n0, mu.n0, mu2.n0, n0[j])
p.m0 <- tmpfun(m0[j]-1); mu.m0 <- p.m0*(m0[j]-1);
mu2.m0 <- p.m0*(m0[j]-1)^2; MM1[2,] <- c(p.m0, mu.m0, mu2.m0, m0[j]-1)
while( (p.m0 > sqrt(.Machine$double.eps) | MM1[nrow(MM1), 2] > MM1[nrow(MM1)-1, 2]) & m0[j] > 1)
{
p.m0 <- tmpfun(m0[j]-2); mu.m0 <- p.m0*(m0[j]-2); mu2.m0 <- p.m0*(m0[j]-2)^2
MM1 <- rbind(MM1, c(p.m0, mu.m0, mu2.m0, m0[j]-2)); m0[j] <- m0[j]-1
}
#### Search from n0 to the right
p.n0 <- tmpfun(n0[j]+1); mu.n0 <- p.n0*(n0[j]+1); mu2.n0 <- p.n0*(n0[j]+1)^2
MM2[2, ] <- c(p.n0, mu.n0, mu2.n0, n0[j]+1)
while( (p.n0 > sqrt(.Machine$double.eps) | MM2[nrow(MM2), 2] > MM2[nrow(MM2)-1, 2]) & n0[j] < max.count)
{
p.n0 <- tmpfun(n0[j]+2); mu.n0 <- p.n0*(n0[j]+2); mu2.n0 <- p.n0*(n0[j]+2)^2
MM2 <- rbind(MM2, c(p.n0, mu.n0, mu2.n0, n0[j]+2)); n0[j] <- n0[j]+1
}
MM2 <- MM2[-1, ]; MM.all <- rbind(MM1, MM2); weight <- 1/sum(MM.all[,1])
if(!is.null(pred.interval)){
#### Equal Tail (alpha/2) Prediction Interval
pd <- cbind(MM.all[,4], MM.all[,1]*weight )
pd1 <- pd[order(pd[,1]), ];pd1 <- cbind(pd1, cumsum(pd1[,2]))
id1 <- suppressWarnings(ifelse(max(which(pd1[,3] <= (1-pred.interval)/2 ))==-Inf, 0,
max(which(pd1[,3] <= (1-pred.interval)/2 ))))
id2 <- min(which(pd1[,3] >= 1-(1-pred.interval)/2 )); L1 <- id1; U1 <- id2-1
#### Shortest Length Prediction Interval
pd2 <- pd[order(pd[,2], decreasing = TRUE),]
pd2 <- cbind(pd2, cumsum(pd2[,2]))
id3 <- which(pd2[,3] >= pred.interval)[1]
L2 <- min(pd2[1:id3,1]); U2 <- max(pd2[1:id3,1])
ans <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2,
L1, U1, L2, U2)
}else{
ans <- c(sum(MM.all[,2])*weight, sum(MM.all[,3]*weight)-(sum(MM.all[,2])*weight)^2)
}
return(ans)
}
out = snowfall::sfSapply(1:NPL, par.pred.inner)
snowfall::sfStop()
}else{
stop("Please install {snowfall} first before using this function!")
}
ans <- t(out)
if(!is.null(pred.interval)){
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2],
ConfidenceLevel = pred.interval,
predInterval.EqualTail = ans[,3:4],
predInterval.Shortest = ans[,5:6]))
}else{
anslist <- (list(obs.locs = obs.locs,
obs.y = obs.y,
pred.locs = pred.locs,
predValue = ans[,1],
predCount = round(ans[,1]),
predVar = ans[,2]))
}
return(anslist)
}
|
047f9c62561cd90fdce30a83faa04afa360fe135 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.management/man/servicequotas_list_reque_servi_quota_chang_histo_by_quota.Rd | a8474845e03286db2ae4c8094a412c68b024ae60 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,147 | rd | servicequotas_list_reque_servi_quota_chang_histo_by_quota.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicequotas_operations.R
\name{servicequotas_list_requested_service_quota_change_history_by_quota}
\alias{servicequotas_list_requested_service_quota_change_history_by_quota}
\title{Retrieves the quota increase requests for the specified quota}
\usage{
servicequotas_list_requested_service_quota_change_history_by_quota(
ServiceCode,
QuotaCode,
Status = NULL,
NextToken = NULL,
MaxResults = NULL
)
}
\arguments{
\item{ServiceCode}{[required] The service identifier.}
\item{QuotaCode}{[required] The quota identifier.}
\item{Status}{The status value of the quota increase request.}
\item{NextToken}{The token for the next page of results.}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, if any, make another call with the token returned
from this call.}
}
\description{
Retrieves the quota increase requests for the specified quota.
See \url{https://www.paws-r-sdk.com/docs/servicequotas_list_requested_service_quota_change_history_by_quota/} for full documentation.
}
\keyword{internal}
|
d416e11b9a5113d783b557231fe6b6df7134078a | 573217e72d2d614b6575d0751397088e87418f79 | /util.R | 780242117e8438f975163a756d4056ffa3ba6055 | [] | no_license | hal-henningsmoen/ExData_Plotting1 | c350fa15d1b62d06f1a4135dfcd992a43478f3f4 | bc897fb5e24b0da5ca54eac3c43b323a1a66aadd | refs/heads/master | 2021-01-22T13:03:48.344712 | 2017-02-03T17:03:27 | 2017-02-03T17:03:27 | 45,550,261 | 0 | 0 | null | 2015-11-04T16:02:47 | 2015-11-04T16:02:47 | null | UTF-8 | R | false | false | 1,193 | r | util.R | library(dplyr)
library(lubridate)
# This function loads:
# - loads the data in filename to a data frame
# - subsets the data to only the targe days for the analysis (Feb 1 and 2, 2007)
# - Creates a datetime column based on the date and time string columns and drops the date and time columns
# - Converts all other columns to type numeric (Missing values set to NA instead of ? at this stage)
load_electrical_data <- function(filename) {
hpc <- read.csv(filename, sep = ";", as.is = TRUE)
targ_date_strings <-
c(
"1/2/2007","01/02/2007","01/2/2007","1/02/2007", "2/2/2007","02/02/2007","02/2/2007", "2/02/2007"
)
hpc_just_targ_dates <- filter(hpc, Date %in% targ_date_strings)
hpc_just_targ_dates <-
mutate(hpc_just_targ_dates, datetime = parse_date_time(paste(Date, Time), '%d/%m/%y %H%M%S'))
hpc_just_targ_dates <- select(hpc_just_targ_dates,-(Date:Time))
i <- 1
while (i <= 6) {
hpc_just_targ_dates[,i] <- as.numeric(hpc_just_targ_dates[,i])
i <- i + 1
}
return(hpc_just_targ_dates)
}
|
0afcfa6abeaef354daabd71ee6e0e613a7b70a79 | 094e952da4fa8698b04fb88b69fbf67668218d24 | /code/ch.5/e/ex5.r | c95a5830d63338da458ccb6ea8c3909874259b63 | [
"MIT"
] | permissive | rhyeu/study_rstan | 42a773beef840f56f64fcd20c5b1b24f88d45e1b | a5b998772358ba64996bc7ca775566f0706fa8f3 | refs/heads/master | 2021-07-08T15:51:37.488890 | 2020-10-04T07:10:03 | 2020-10-04T07:10:03 | 195,388,081 | 0 | 0 | null | null | null | null | UHC | R | false | false | 776 | r | ex5.r | library(ggplot2)
# after run-model5-6.R
load('D:/006966_wonderful2/02. 원고개발/실습/ch.5/result-model5-6.RData')
ms <- rstan::extract(fit)
d_qua <- t(apply(ms$m_pred, 2, quantile, prob=c(0.1, 0.5, 0.9)))
colnames(d_qua) <- c('p10', 'p50', 'p90')
d_qua <- data.frame(d, d_qua)
d_qua$A <- as.factor(d_qua$A)
p <- ggplot(data=d_qua, aes(x=M, y=p50, ymin=p10, ymax=p90, shape=A, fill=A))
p <- p + coord_fixed(ratio=1, xlim=c(10, 80), ylim=c(10, 80))
p <- p + geom_pointrange(size=0.8)
p <- p + geom_abline(aes(slope=1, intercept=0), color='black', alpha=3/5, linetype='31')
p <- p + scale_shape_manual(values=c(21, 24))
p <- p + labs(x='Observed', y='Predicted')
ggsave(file='D:/006966_wonderful2/02. 원고개발/실습/ch.5/e/fig-ex5.png', plot=p, dpi=300, w=5, h=4)
|
a5d9742c9fd9ef528a98622343f5606d9dca1ff4 | edd22bbc5848892113a519ad3490d6d5709772ed | /plot4.R | 7ef6c5c4e0ad1e2d03502c44aa7bca5298fdb08a | [] | no_license | ivanobando67/cior | f56e3aa9c1323aaeb9365218f96fe0cb875f6ec7 | 2585a60183afffe930be4e1c528fe0fc81c179b1 | refs/heads/master | 2022-11-29T20:45:58.319068 | 2020-08-16T06:33:08 | 2020-08-16T06:33:08 | 283,086,476 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 859 | r | plot4.R | # Tarea 2
# Carlos Obando
#----------------
# Pregunta 4.
# En todo Estados Unidos,
# ¿cómo han cambiado las emisiones de fuentes relacionadas
# con la combustión de carbón entre 1999 y 2008?
#-------------------------------------------------------------------------
setwd("C:/CIOR/UNIVERSIDAD_CENTRAL/COURSERA/DATA")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
str(SCC)
table(SCC$SCC.Level.One)
bdd <- merge(NEI,SCC,by="SCC")
bdd1 <- subset(bdd,SCC.Level.One=="External Combustion Boilers")
#Plot 4
png("plot4.png",width=480,height=480)
boxplot(bdd1$Emissions~factor(bdd1$year),
main="Emisiones por combustión de carbón",
xlab="Años",ylab="Emisiones",
col=c("blue","red","green","pink"),
ylim=c(0,10000))
dev.off()
|
c0169abf6622de3e4c98cc9a7846c95d88b48010 | 4c27422b8a417b0c3559d89eb3d17a36feb00459 | /vinelandcomparisons.R | f254d1dca35e1d21d391e53e533468efb5587c0b | [] | no_license | landiisotta/NDAR_data | 82f67730e95701b72247a46e2376ddc2b8be8002 | 4f246d7d710533d37de575bc362be91640cb8f5a | refs/heads/master | 2022-07-12T15:35:19.618190 | 2020-05-15T20:01:33 | 2020-05-15T20:01:33 | 255,643,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,774 | r | vinelandcomparisons.R | require(ggplot2)
require(reshape2)
library(RColorBrewer)
require(ComplexHeatmap)
require(ggalluvial)
#########################FUNCTIONS#############################
# Compare missing information
compare_missinfo <- function(p, lev){
print(sprintf('Check missing data confounders period %s, subdomains', p))
percDf <- read.table(file.path(folder_name,
paste('VINELANDmiss_perc_', lev, p, '.csv', sep='')),
header=TRUE,
as.is=TRUE,
sep=',')
percDf$cluster <- as.factor(percDf$cluster)
for (col in names(percDf)[2:(length(names(percDf))-1)]){
print(col)
if (length(unique(percDf$cluster)) == 2){
print(t.test(percDf[, col]~percDf$cluster))
} else{
print(summary(aov(percDf[, col]~percDf$cluster)))
print(pairwise.t.test(percDf[, col], percDf$cluster))}
}
}
# Check for confounders: SEX, SITE, PHENO
confounders <- function(df, p, lev){
if (lev == 'L1'){
# SEX
print('Compare sex for subdomain clusters')
tabsex_sub <- table(df$sex, df$cluster_subdomain)
print(chisq.test(tabsex_sub))
# SITE
print('Compare sites for subdomain clusters')
tabsite_sub <- table(df$site, df$cluster_subdomain)
print(chisq.test(tabsite_sub))
# PHENOTYPE
print('Compare phenotypes for subdomain clusters')
tabpheno_sub <- table(df$phenotype, df$cluster_subdomain)
print(chisq.test(tabpheno_sub))
# INTERVIEW AGE
if (length(unique(df$cluster_subdomain))==2){
print(t.test(df$interview_age~df$cluster_subdomain))
} else{
print(summary(aov(interview_age~cluster_subdomain, df)))
print(pairwise.t.test(df$interview_age, df$cluster_subdomain))}
} else {
#SEX
print('Compare sex for domain clusters')
tabsex_dom <- table(df$sex, df$cluster_domain)
print(chisq.test(tabsex_dom))
#SITE
print('Compare sites for domain clusters')
tabsite_dom <- table(df$site, df$cluster_domain)
print(chisq.test(tabsite_dom))
#PHENO
print('Compare phenotype for domain clusters')
tabpheno_dom <- table(df$phenotype, df$cluster_domain)
print(chisq.test(tabpheno_dom))
#AGE
if (length(unique(df$cluster_domain))==2){
print(t.test(df$interview_age~df$cluster_domain))
} else{
print(summary(aov(interview_age~cluster_domain, df)))
print(pairwise.t.test(df$interview_age, df$cluster_domain))
}
}
}
# Compare clusters
clust_comparison <- function(df, p, lev){
if (lev == 'L1'){
sprintf('Comparing subdomain scores at period $s', p)
df_long <- melt(subset(df, select=c('subjectkey', 'cluster_subdomain', subdomain_features)),
id.vars=c('subjectkey', 'cluster_subdomain'))
df_long$cluster_subdomain <- as.character(df_long$cluster_subdomain)
print(ggplot(df_long, aes(x=variable, y=value, fill=cluster_subdomain)) +
geom_boxplot() +
facet_wrap(~variable, scale="free") +
coord_cartesian(ylim = c(min(df_long$value), max(df_long$value))) +
ggtitle(sprintf('Subdomain features (VINELAND) -- period %s', p)))
for (col in subdomain_features){
print(col)
if (length(unique(df$cluster_subdomain))==2){
print(t.test(df[, col]~df$cluster_subdomain, alternative='less'))
} else{
print(summary(aov(df[, col]~df$cluster_subdomain)))
print(pairwise.t.test(df[, col], df$cluster_subdomain))}
}
} else {
sprintf('Comparing domain scores at period %s', p)
df_long <- melt(subset(df, select=c('subjectkey', 'cluster_domain', domain_features)),
id.vars=c('subjectkey', 'cluster_domain'))
df_long$cluster_domain <- as.character(df_long$cluster_domain)
print(ggplot(df_long, aes(x=variable, y=value, fill=cluster_domain)) +
geom_boxplot() +
facet_wrap(~variable, scale="free") +
coord_cartesian(ylim = c(min(df_long$value), max(df_long$value))) +
ggtitle(sprintf('Domain features (VINELAND) -- period %s', p)))
for (col in domain_features){
print(col)
if (length(unique(df$cluster_domain))==2){
print(t.test(df[, col]~df$cluster_domain, alternative='less'))
} else{
print(summary(aov(df[, col]~df$cluster_domain)))
print(pairwise.t.test(df[, col], df$cluster_domain))}
}
}}
# Compare features within the same cluster
# Compare scores within clusters
feat_comparison <- function(df, p, lev){
if (lev=='L1'){
df_long <- melt(subset(df, select=c('subjectkey', 'cluster_subdomain', subdomain_features)),
id.vars=c('subjectkey', 'cluster_subdomain'))
df_long$cluster_subdomain <- as.character(df_long$cluster_subdomain)
for (cl in sort(unique(df_long$cluster_subdomain))){
print(sprintf('Analyzing cluster %s', cl))
print(pairwise.t.test(df_long$value[which(df_long$cluster_subdomain==cl)],
df_long$variable[which(df_long$cluster_subdomain==cl)]))
}
print(ggplot(df_long, aes(x=cluster_subdomain, y=value, fill=variable)) +
geom_boxplot() +
facet_wrap(~cluster_subdomain, scale="free") +
coord_cartesian(ylim = c(min(df_long$value), max(df_long$value))) +
ggtitle(sprintf('Subdomain features for each clusters (VINELAND) -- period %s', p)))} else{
df_long <- melt(subset(df, select=c('subjectkey', 'cluster_domain', domain_features)),
id.vars=c('subjectkey', 'cluster_domain'))
df_long$cluster_domain <- as.character(df_long$cluster_domain)
for (cl in sort(unique(df_long$cluster_domain))){
print(sprintf('Analyzing cluster %s', cl))
print(pairwise.t.test(df_long$value[which(df_long$cluster_domain==cl)],
df_long$variable[which(df_long$cluster_domain==cl)]))
}
print(ggplot(df_long, aes(x=cluster_domain, y=value, fill=variable)) +
geom_boxplot() +
facet_wrap(~cluster_domain, scale="free") +
coord_cartesian(ylim = c(min(df_long$value), max(df_long$value))) +
ggtitle(sprintf('Domain features for each clusters (VINELAND) -- period %s', p)))
}
}
# Heatmaps for replicability
# Visualize subject distance between train/test and within clusters
replheat <- function(p, lev){
# TRAIN
distdf_tr <- read.table(file.path(folder_name,
paste('VINELAND_dist', toupper(lev), 'TR', p, '.csv', sep='')),
header = TRUE,
as.is = TRUE,
sep = ',',
row.names=1)
if (lev=='subdomain'){
clust_tr <- distdf_tr$cluster_subdomain
distmat_tr <- as.matrix(subset(distdf_tr, select=-c(cluster_subdomain)))
} else{
clust_tr <- distdf_tr$cluster_domain
distmat_tr <- as.matrix(subset(distdf_tr, select=-c(cluster_domain)))
}
row.names(distmat_tr) <- row.names(distdf_tr)
colnames(distmat_tr) <- names(distdf_tr)[1:(ncol(distdf_tr)-1)]
colSide <- brewer.pal(9, "Set1")[3:9]
col_v <- list(clusters = c())
for (idx in sort(unique(clust_tr))){
col_v$clusters <- c(col_v$clusters, colSide[idx])}
names(col_v$clusters) <- as.character(sort(unique(clust_tr)))
hTR <- Heatmap(distmat_tr,
heatmap_legend_param = list(
title = paste('VINELAND', '\ndist mat TR', sep=''), at = seq(min(distmat_tr),
max(distmat_tr), 0.5)),
# name = paste(name_ins, '\ndist mat TR', sep=''),
show_row_names = FALSE,
show_column_names = FALSE,
show_row_dend = FALSE,
show_column_dend = FALSE,
cluster_rows = FALSE,
cluster_columns = FALSE,
# col = colorRampPalette(brewer.pal(8, "Blues"))(25),
left_annotation = HeatmapAnnotation(clusters=clust_tr,
col=col_v, which='row'),
top_annotation = HeatmapAnnotation(clusters=clust_tr,
col=col_v, which='column', show_legend = FALSE))
# TEST
distdf_ts <- read.table(file.path(folder_name,
paste('VINELAND_dist', toupper(lev), 'TS', p, '.csv', sep='')),
header = TRUE,
as.is = TRUE,
sep = ',',
row.names=1)
if (lev=='subdomain'){
clust_ts <- distdf_ts$cluster_subdomain
distmat_ts <- as.matrix(subset(distdf_ts, select=-c(cluster_subdomain)))
} else{
clust_ts <- distdf_ts$cluster_domain
distmat_ts <- as.matrix(subset(distdf_ts, select=-c(cluster_domain)))
}
row.names(distmat_ts) <- row.names(distdf_ts)
colnames(distmat_ts) <- names(distdf_ts)[1:(ncol(distdf_ts)-1)]
col_vts <- list(clusters = c())
for (idx in sort(unique(clust_ts))){
col_vts$clusters <- c(col_vts$clusters, colSide[idx])}
names(col_vts$clusters) <- as.character(sort(unique(clust_ts)))
hTS <- Heatmap(distmat_ts,
heatmap_legend_param = list(
title = paste('VINELAND', '\ndist mat TS', sep=''), at = seq(min(distmat_ts),
max(distmat_ts), 0.5)),
# name = paste(name_ins, '\ndist mat TR', sep=''),
show_row_names = FALSE,
show_column_names = FALSE,
show_row_dend = FALSE,
show_column_dend = FALSE,
cluster_rows = FALSE,
cluster_columns = FALSE,
# col = colorRampPalette(brewer.pal(8, "Blues"))(25),
left_annotation = HeatmapAnnotation(clusters=clust_ts,
col=col_vts, which='row'),
top_annotation = HeatmapAnnotation(clusters=clust_ts,
col=col_vts, which='column', show_legend = FALSE))
grid.newpage()
title = sprintf('%s feature Level %s distance matrices train/test comparisons', 'VINELAND', lev)
grid.text(title, x=unit(0.5, 'npc'), y=unit(0.8, 'npc'), just='centre')
pushViewport(viewport(x = 0, y = 0.75, width = 0.5, height = 0.5, just = c("left", "top")))
grid.rect(gp = gpar(fill = "#00FF0020"))
draw(hTR, newpage = FALSE)
popViewport()
pushViewport(viewport(x = 0.5, y = 0.75, width = 0.5, height = 0.5, just = c("left", "top")))
grid.rect(gp = gpar(fill = "#0000FF20"))
draw(hTS, newpage = FALSE)
popViewport()
}
## Pos-hoc comparisons, replication of clusters, new clustering
## Vineland L1, L2 levels at P1, P2, P3
folder_name <- './out'
domain_features <- c('communicationdomain_totalb',
'livingskillsdomain_totalb',
'socializationdomain_totalb')
subdomain_features <- c('receptive_vscore', 'expressive_vscore',
'personal_vscore', 'domestic_vscore',
'community_vscore', 'interprltn_vscore', 'playleis_vscore',
'copingskill_vscore')
# Read missing data patterns
for (p in c('P1', 'P2', 'P3')){
# Results replicability
replheat(p, 'subdomain')
replheat(p, 'domain')
# Compare missing info
# compare_missinfo(p, 'L1')
# compare_missinfo(p, 'L2')
# df <- read.table(file.path(folder_name,
# paste('VINELANDdata', p, '.csv', sep='')),
# header=TRUE,
# as.is=TRUE,
# sep=',')
# # Confounders
# confounders(df, p, 'L1')
# confounders(df, p, 'L2')
#
# # Compare instrument mean scores
# clust_comparison(df, p, 'L1')
# clust_comparison(df, p, 'L2')
#
# # Compare feature scores within the same cluster
# feat_comparison(df, p, 'L1')
# feat_comparison(df, p, 'L2')
#
# # Plot alluvial plot
# sprintf('Alluvial plot for period %s between Vineland domains and subdomains', p)
# alldf <- subset(df, select=c(subjectkey, cluster_subdomain, cluster_domain, sex))
# alldf <- alldf[order(alldf$sex),]
# print(is_alluvia_form(alldf))
# plot(ggplot(alldf,
# aes(axis1 = cluster_subdomain, axis2 = cluster_domain)) +
# geom_alluvium(aes(fill=sex), width = 1/12) +
# geom_stratum(width = 1/12, fill = "black", color = "grey") +
# geom_label(stat = "stratum", infer.label = TRUE) +
# scale_x_discrete(limits = c("Level 1", "Level 2"), expand = c(.05, .05)) +
# scale_fill_brewer(type = "qual", palette = "Set1") +
# ggtitle(sprintf('Subject movements between Vineland subdomains and domains at period %s', p)))
#
# df$cluster <- paste(df$cluster_subdomain, df$cluster_domain, sep='-')
}
|
6a41bf0a18b441c5906dd7880cdf77ba73ebe025 | 5355ce6341489f05dc5894a70cf5cff1f951a194 | /man/dbCreateUserEM.Rd | 1ca402de02a6ddc744415058cb99825082d55fcf | [] | no_license | AndreMikulec/econModel | 5032565f1722275425f75b55811493d45bf87f8c | 22b8507838116d3e33b6e40cf891988ad104ac7b | refs/heads/master | 2023-06-03T19:23:00.544927 | 2021-06-26T07:38:20 | 2021-06-26T07:38:20 | 303,683,123 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,238 | rd | dbCreateUserEM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AmerAssocIndividInvestorsAAII.R
\name{dbCreateUserEM}
\alias{dbCreateUserEM}
\title{User Creation}
\usage{
dbCreateUserEM(
connName,
user,
attributes = c("LOGIN"),
password = user,
env,
display = TRUE,
exec = TRUE
)
}
\arguments{
\item{connName}{String. Default is "connEM". Contains the name of the variable that contains the name of the "connection" in the environment "env".}
\item{user}{String. Required. Potential user in the database.}
\item{attributes}{vector of Strings. User attributes.}
\item{password}{String. Defaults to "user".}
\item{env}{Environment. Default is the .Global environment. This is the environment to return the connection object "connEM".}
\item{display}{Logical. Whether to display the query (defaults to \code{TRUE}).}
\item{exec}{Logical. Whether to execute the query (defaults to \code{TRUE}).}
}
\value{
TRUE(success) or Error(failure)
}
\description{
Create a user exists in the database.
}
\examples{
\dontrun{
# Does not check if the user already exists
# A user who manages a [future] "personal" database . . .
dbCreateUserEM(user = "r_user", attributes = c("LOGIN", "CREATEDB", "CREATEROLE"))
}
}
|
2bf9a0c71f3da6afec04fd258d23b979048a94bb | 8820fc36c5a3b9248bd333327808c7e8ed808ac1 | /Survival.R | a323b1da84f4019ca4ad80f15dd2177b01f93454 | [] | no_license | tranwin/Functional-Genomics | 120f95ad6d2ff51cf0ae8dfb9e8d4b1da892ff75 | a577a230d85f12c1fac5375336760203251375b6 | refs/heads/master | 2022-09-01T13:52:56.050264 | 2022-08-17T15:20:59 | 2022-08-17T15:20:59 | 108,777,942 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 863 | r | Survival.R | # Load the bioconductor installer.
source("https://bioconductor.org/biocLite.R")
# Install the packages
biocLite("RTCGA")
biocLite("RTCGA.clinical")
biocLite("RTCGA.mRNA")
# Load the packages
library(RTCGA)
library(RTCGA.clinical)
# Explore the data
dim(BRCA.clinical)
names(BRCA.clinical)
# Create the clinical data object
clin <- survivalTCGA(BRCA.clinical, OV.clinical, GBM.clinical,
extract.cols="admin.disease_code")
# Show the head
head(clin, 5)
# Create table by the outcome
xtabs(~admin.disease_code+patient.vital_status, data=clin) %>% addmargins()
# Run Cox model
coxph(Surv(times, patient.vital_status)~admin.disease_code, data=clin)
# Fit the survival curve
sfit <- survfit(Surv(times, patient.vital_status)~admin.disease_code, data=clin)
summary(sfit, times=seq(0,365*5,365))
ggsurvplot(sfit, conf.int=TRUE, pval=TRUE)
|
602a4f98fff8fafb8e8a82fde9ffc70c105dd121 | 32327508ebb4bcee65d55b928c627078b31abfd4 | /rawcode/2_lightgbm.R | 186709720ca431ab2360f3e0e0c996dbaf4f2fe8 | [] | no_license | serzzh/kaggleInstacart | b373569482e6242b304423d3d824122408cfe721 | bd0193d6b48d88b77b3abd8fcdfc2f1c2067c923 | refs/heads/master | 2021-01-01T06:56:38.074608 | 2019-12-23T12:56:23 | 2019-12-23T12:56:23 | 97,555,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,219 | r | 2_lightgbm.R |
###########################################################################################################
#
# Kaggle Instacart competition
# Fabien Vavrand, June 2017
# Simple xgboost starter, score 0.3791 on LB
# Products selection is based on product by product binary classification, with a global threshold (0.21)
#
###########################################################################################################
library(data.table)
library(dplyr)
library(tidyr)
# Load Data ---------------------------------------------------------------
path <- "../input"
data <- readRDS(file.path(path, "data.RDS"))
# Feature tuning - remove features to study influence
#feat <- read.csv(file.path(path, "features.csv"))
# rem_feat = c( 'user_order_recency',
# 'prod_mean_add_to_cart_order',
# 'prod_days_since_prior',
# 'user_product_diversity',
# 'prod_penetration',
# 'prod_double_penetration',
# 'weekly_orders')
#data <- data[!colnames(data) %in% feat[10:27]]
# Train / Test datasets ---------------------------------------------------
train <- as.data.frame(data[data$eval_set == "train",])
train$eval_set <- NULL
train$user_id <- NULL
train$reordered[is.na(train$reordered)] <- 0
test <- as.data.frame(data[data$eval_set == "test",])
test$eval_set <- NULL
test$user_id <- NULL
test$reordered <- NULL
rm(data)
gc()
# Model -------------------------------------------------------------------
library(lightgbm)
require(Ckmeans.1d.dp)
require(MLmetrics)
source('include.R')
## 20% of the sample size
smp_size <- floor(0.4 * nrow(train))
## set the seed to make your partition reproductible
set.seed(123)
train_ind <- sample(seq_len(nrow(train)), size = smp_size)
subtrain <- train[train_ind,]
valid <- train[-train_ind,] %>% sample_frac(0.3)
rm(train)
params <- list(
objective = "binary",
metric = "binary_logloss",
learning_rate = 0.1,
verbose = 2,
#nfold = 5,
min_data =5
#nrounds =90
#"eta" = 0.1,
#"max_depth" = 8
# "min_child_weight" = 10,
# "gamma" = 0.70,
# "subsample" = 0.77,
# "colsample_bytree" = 0.95,
# "alpha" = 2e-05,
# "lambda" = 10,
# "watchlist" = watchlist
)
X <- lgb.Dataset(as.matrix(subtrain %>% select(-reordered, -order_id, -product_id, -aisle, -department)), label = subtrain$reordered)
#rm(subtrain)
#model <- lgb.train(params, X, 90)
subtrain$prob <- predict(model,X)
#model <- xgb.load('xgboost.model')
lgb.save(model,'lgb.model')
importance <- lgb.importance(model)
#feat<-importance$Feature
#xgb.ggplot.importance(importance)
rm(X, Y, subtrain)
gc()
#train-logloss last 0.2434
## Threshold prediction-----------------------------------------------
#subtrain$prob <- predict(model, X)
#rm(X)
## adding metrics to training dataset
##subtrain<-add_metrics(subtrain)
# Validation, initial (threshold=0.21, Pc=0.389, Rc=0.51, f1=0.4418), last=0.4424329
print(my_validation(model, valid, 0.21, 'lgbm'))
source('include.R')
source('f1.R')
# Apply models -------------------------------------------------------------
X <- xgb.DMatrix(as.matrix(test %>% select(-order_id, -product_id, -aisle, -department)))
test$prob <- predict(model, X)
# Apply threshold
result <- apply_threshold(test)
#print(test, n=200)
#test$reordered <- (test$reordered > 0.20) * 1
submission <- result %>%
filter(reordered == 1) %>%
group_by(order_id) %>%
summarise(
products = paste(product_id, collapse = " ")
#n_prod = n()-sum(product_id=='None')
)
#submission[submission$n_prod==0 | is.na(submission$n_prod),]$products<-'None'
missing <- data.frame(
order_id = unique(test$order_id[!test$order_id %in% submission$order_id]),
products = "None"
)
submission <- submission %>% bind_rows(missing) %>% arrange(order_id)
submission$n_prod<-NULL
submission[submission$products=='' | is.na(submission$products),]<-'None'
write.csv(submission, file = "submit.csv", row.names = F)
|
ae92382ff24af4c4ceb1fc4ce81fec01278568b6 | ad684b223e22a682021ca052851b663125f2919f | /3_raster_data.R | 3b7f174e9938d5a228757dc1a0053b57ed439634 | [] | no_license | alschel/andan-cartography-2018 | ad883f7bb9e6b5bae774dfa0e3ed415c50f87ae2 | d0a19b16bf0b01318e7b8def8334f368204f9d85 | refs/heads/master | 2020-03-23T14:19:04.735402 | 2018-08-03T14:32:52 | 2018-08-03T14:32:52 | 141,668,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,313 | r | 3_raster_data.R | # Часть 3. Растровые данные
# Автор: Александр Шелудков, ИГРАН
# Дата: 25.07.2018
#=================================
# 3.2 Классы растровых изображений
#=================================
# Пакет raster позволяет создавать объекты трех классов: RasterLayer, RasterStack и RasterBrick
# RasterLayer состоит из 1 слоя (1 переменная)
r <- raster(ncol=10, nrow=10, xmx=-80, xmn=-150, ymn=20, ymx=60) # creates an "empty" RasterLayer object
r
# В данном примере мы создали RasterLayer, состоящий из 100 ячеек (10*10).
# Обратите внимание на систему координат объекта.
# В каких единицах указано разрешение растра (размеры одной ячейки)?
values(r) # Сейчас ячейки пустые, наполним их значениями
values(r) <- 1:ncell(r)
plot(r)
# RasterStack и RasterBrick это классы растровых объектов с множеством слоев.
# Разница в том, как они хранят данные: RasterStack - это список (list) из нескольких файлов
# или слоев ("bands") одного файла, имеющих одинаковый экстент и разрешение.
# RasterBrick - это всегда один многослойный объект, поэтому расчеты с ним могут быть быстрее.
# RasterStack
s <- stack(r, r*r)
s
plot(s)
# RasterBrick
b <- brick(s)
b
plot(b)
#=============================
# 3.3. Чтение растровых данных
#=============================
# Для примера посмотрим на данные о высотах из проекта ASTER GDEM 2011
# Read raster data
elev <- raster("data/ASTGTM2_N55E037_dem.tif")
elev
# Что это за территория? Попробуем взглянуть на картинку
plot(elev)
#==============================================================
# 3.4. Визуализация растровых изображений с помощью `rasterVis`
#==============================================================
# Построим простую карту высот
# 3.4.1 Подготовка данных
# Для начала удалим все лишнее, обрезав растр в границах города
# Чтение данных о границах
moscow <- shapefile("data/mos_ao/ao.shp")
plot(moscow)
moscow@data
# Чтение данных о границах
elev %>%
crop(moscow) %>% # returns raster clipped by Spatial* object’s extent
mask(moscow) -> elev_cropped # returns raster, clipped by Spatial* object’s contour
elev_cropped
plot(elev_cropped)
# 3.4.2 Интервалы
# Какой перевад высот мы получили? Взглянем на гистограмму
ggplot()+
geom_histogram(aes(values(elev_cropped)))
summary(values(elev_cropped))
# Есть небольшое число выбросов, проигнорируем их
# 3.4.3 Levelplot
library(rasterVis)
levelplot(elev_cropped,
at=seq(75, 300, 25),
col.regions=colorRampPalette(rev(brewer.pal(10, 'RdYlGn'))),
margin=FALSE, colorkey=list(space='bottom',
labels=list(at=seq(75, 300, 25), font=4),
axis.line=list(col='black'),
width=0.75),
par.settings=list(strip.border=list(col='transparent'),
strip.background=list(col='transparent'),
axis.line=list(col='transparent')),
xlab = NULL,
ylab = NULL,
scales=list(draw=FALSE),
alpha.regions = 1)
# ================
# 3.5. А что если?
# ================
# 3.5.1 А что если я хочу вытащить статистику из растра по отдельным полигонам и не обрезать растр каждый раз
extract(elev_cropped, # RasterLayer
moscow, # SpatialPolygon
fun = mean, # функция
# sp = T, # сохрани результат расчета в таблицу атрибутов Spatial* объекта
df = T) # верни табличку
# а что если растр такой тяжелый, а у меня не видеокарта для майнинга?
# Тогда используй пакет velox. Он использует другие алгоритмы вычислений.
# Минус в том, что velox работает только с объектами собственных классов
library(velox)
# создаем объект velox из нашего растра
my_velox <- velox(elev_cropped)
# доступные функции хранятся в свойствах самого объекта.
# Вызываем их через $
my_velox$extract(sp = moscow,
fun = mean,
df = T)
# 3.5.2 А что если я хочу добавить в levelplot векторные объекты?
levelplot(elev_cropped,
at=seq(75, 300, 25),
col.regions=colorRampPalette(rev(brewer.pal(10, 'RdYlGn'))),
margin=FALSE, colorkey=list(space='bottom',
labels=list(at=seq(75, 300, 25), font=4),
axis.line=list(col='black'),
width=0.75),
par.settings=list(strip.border=list(col='transparent'),
strip.background=list(col='transparent'),
axis.line=list(col='transparent')),
xlab = NULL,
ylab = NULL,
scales=list(draw=FALSE),
alpha.regions = 1)+
layer(sp.lines(moscow, col = "grey32", lwd = 1.5))
# 3.5.3 А что если я хочу хочу сохранить мой растр?
writeRaster(elev_cropped, filename = "data/Moscow_elevation.tif")
|
1669453f99456d402575ab25caa563dee768bad3 | 3ee80e0313c250966700ea1fbac120bb954c5e72 | /man/absolute.risk.Rd | 14b5699721c5b3def555d4fc787d9f742a79739d | [] | no_license | cran/BCRA | 2ba8fd5fac3435de00978a4b5aa431f1cede46b1 | 05bfdea1a18c78f411afa9c8c0fbcdf0ccae7499 | refs/heads/master | 2021-01-22T05:43:12.644706 | 2020-07-10T14:40:06 | 2020-07-10T14:40:06 | 34,805,723 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,833 | rd | absolute.risk.Rd | \name{absolute.risk}
\alias{absolute.risk}
\title{
Estimate absolute risks
}
\description{
A function to estimate absolute risks of developing breast cancer
}
\usage{
absolute.risk(data, Raw_Ind=1, Avg_White=0)
}
\arguments{
\item{data}{
A data set containing all the required input data needed to perform risk projections,
such as initial age, projection age, BrCa relative risk covariates and race. See \code{exampledata} for details.
}
\item{Raw_Ind}{
The raw file indicator with default value 1.
\code{Raw_Ind}=1 means RR covariates are in raw/original format.
\code{Raw_Ind}=0 means RR covariates have already been re-coded to 0, 1, 2 or 3.
}
\item{Avg_White}{Calculation indicator.
\code{Avg_White}=0, calculate absolute risks;
\code{Avg_White}=1, calculate average absolute risks based on the rates for average non-hispanic white women and average other (native american) women.
The default value is 0.
}
}
\details{
For the projection of absolute risks, this function is defined based on Gail Model.
Parameters and constants needed in this function include initial and projection age,
recoded covariates from function \code{recode.check}, relative risks of BrCa at age
"<50" and ">=50" from function \code{relative.risk} as well as other known constants
like BrCa composite incidences, competing hazards, 1-attributable risk using in NCI
BrCa Risk Assessment Tool (NCI BCRAT).
}
\value{
A vector which returns absolute risk values when Avg_White=0 or average absolute risk values
when Avg_White=1.
}
\seealso{
\code{\link{recode.check}}, \code{\link{relative.risk}}
}
\examples{
data(exampledata)
# calculate absolute risk
absolute.risk(exampledata)
# calculate average absolute risk
Avg_White <- 1
absolute.risk(exampledata, Raw_Ind=1, Avg_White)
} |
643642dd61795821cdc8b5cf2f5f0adb76835af2 | 7a728c699477aa5dfbfe6322619325441c595265 | /surface_metrics/geodiv/man/ssc.Rd | 6b716bad514b32d66378f423ab9c12870ff549f9 | [
"MIT"
] | permissive | bioXgeo/SyntheticLandscape | 0bdcba730dbfb3c60a94e67d6111909e71cdd632 | dd0661e03513d578d32b281eb99f68b0c0c2a001 | refs/heads/master | 2021-01-25T00:23:47.429011 | 2019-07-31T19:25:27 | 2019-07-31T19:25:27 | 123,295,277 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 551 | rd | ssc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/localsurface.R
\name{ssc}
\alias{ssc}
\title{Mean Summit Curvature}
\usage{
ssc(x)
}
\arguments{
\item{x}{A raster object.}
}
\value{
A numeric value representing the average curvature of
surface peaks.
}
\description{
Calculates the mean summit curvature of a raster. Mean summit
curvature is the average principle curvature of local maximas
on the surface.
}
\examples{
# import raster image
data(normforest)
# calculate mean summit curvature
Ssc <- ssc(normforest)
}
|
01868c9eb1bac241d571019c58f1b0bb095e6370 | f9b8b7ab6bc852eeacda48a73568f8bc10d18439 | /Relation_nu_C.R | 2d5a30b0b4d914ac193755b055e0d737b06132ab | [] | no_license | chor-nyan/anomaly-detectionR | edfc9f78013fbdace4654819a2e488d4ccf26406 | 3481d7c6f5460c2103ea48cac9af16a84b4a6d7c | refs/heads/master | 2020-05-17T21:38:10.505493 | 2019-05-08T23:09:43 | 2019-05-08T23:09:43 | 183,978,426 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 779 | r | Relation_nu_C.R | library(kernlab)
library(mlbench)
library(e1071)
# dat <- mlbench.spirals(400, cycles = 1.2, sd = 0.07)
# x <- dat$x
# y <- dat$classes
# rbfsvm <- ksvm(x, y, type = "C-svc", kernel = "rbfdot")
# par(ps = 16, lwd = 2)
# plot(x, col = predict(rbfsvm, x))
#
# testx <- matrix(rnorm(100),,2)
# predict(rbfsvm, testx)
dat <- mlbench.2dnormals(200, cl = 2, sd = 1)
plot(dat)
x <- dat$x
y <- dat$classes
# y <- rep(1, 200)
d.svm <- svm(x, y)
nusvm <- ksvm(x, y, type = "nu-svc", kernel = "rbfdot")
c <- 1/(nusvm@b * 200)
Csvm <- ksvm(x, y, type = "C-svc", kernel = "rbfdot", C = c)
C <- 10
Csvm <- ksvm(x, y, type = "C-svc", kernel = "rbfdot", C = C)
alpha <- unlist(Csvm@alpha)
nu <- sum(alpha)/(C*200)
nusvm <- ksvm(x, y, type = "nu-svc", kernel = "rbfdot", nu = nu)
Csvm
nusvm
|
c47cd733a7c5c2c1a7b2f4556d049c787ff5a84c | 03501e75ff173368902ebe7c972d70a8b27b3c0a | /R/UNdata.R | 29f56c30c761e4ac26f14c4b3b07c8fbc620bd67 | [] | no_license | ansikmahapatra/public-codes | bb7097d705e4ca7b1a79f4a4b0abbcf4e6465276 | 31c4fb46e41d3add5d88fc64883aea2f5d18b4c3 | refs/heads/master | 2022-12-12T15:36:12.876337 | 2020-09-07T20:39:10 | 2020-09-07T20:39:10 | 293,627,444 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,536 | r | UNdata.R | #United Nations Data// World Bank
#install.packages("WDI")
library(WDI)
country<-c('DZ','AO','BJ',
'BW','BF','BI',
'CV','CM','CF','TD',
'KM','CG','CD','CI',
'DJ','EG','GQ','ER','ET',
'GA','GM','GH','GN',
'GW','KE','LS',
'LR','LY','MG',
'MW','ML','MR',
'MU','MA','MZ',
'NA','NE','NG',
'RW','ST','SN',
'SC','SL','SO',
'ZA','SS','SD',
'SZ','TZ','TG',
'TN','UG','ZM','ZW')
indicator<-c('EG.ELC.ACCS.ZS',
'EG.ELC.PROD.KH',
'EG.ELC.RNEW.KH',
'IC.GE.NUM ',
'EG.ELC.HOUS.ZS',
'IC.BUS.EASE.XQ')
headings<-c("iso",
"country",
"year",
"Access to Electricity",
"Electricity Production",
"Electricity production from renewable sources",
"Procedures required to connect to electricity",
"Household electrification rate")
try({wdi_data<-WDI(indicator=indicator, country=country, start=2017, end=2017)},silent=TRUE)
#names(wdi_data)<-headings
write.csv(wdi_data,"~/Desktop/wdi_data.csv")
ecokraft<-read.csv("~/OneDrive/Eco-Kraft/AAM_CR_CSV.csv")
summary(lm(ecokraft$Rank~ecokraft$Pop.Elec+ecokraft$Pop.Density+ecokraft$GDP.PC+ecokraft$Ann..GDP.Growth+ecokraft$Corruption+ecokraft$Unemployment+ecokraft$Inflation+ecokraft$Mobile+ecokraft$Internet+ecokraft$Elec.Consump+ecokraft$ClimateScope+ecokraft$HDI+ecokraft$Country.Risk)) |
f26cae165cacddd2184fb981c2ab77835add9cd1 | 2713c05a459d61a5c6f87011298b4d9c00369874 | /SocialNetworkAnalysis/Undirected Graphs/degCent.un.R | a69ae3ff3bc08d67a94dc046933b6be40b92f6f7 | [] | no_license | damiansp/R | 0f6cc2b514a15bf22dd1ed7c12033d06e61867b6 | 6efa47560799a05da672d77d6a05f470f1a14f6e | refs/heads/master | 2021-12-28T14:39:33.200099 | 2021-12-20T19:32:57 | 2021-12-20T19:32:57 | 59,253,315 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | r | degCent.un.R | degCent.un <- function(X) {
#Calculates group degree centrality in network
#See p. 180, eq. (5.5) in "Social Network Analysis: Methods and Applications" by Wasserman & Faust
if(dim(X)[1] != dim(X)[2]){
print("Matrix must be square")
break
}
if(identical(X, t(X))==F){
print("Matrix is not undirected")
break
}
nMax <- max(rowSums(X))
sum(nMax - rowSums(X)) / ((dim(X)[1]-1) * (dim(X)[1]-2))
} |
4354cc702f6bfcba569550b72e7a7d6b5a60d5e1 | 4cd99265b16a75448314aa0f8a3a849987c8a396 | /Plot2.R | 970dee19d7a5e3aec584b3e9e84a7f296741d3f7 | [] | no_license | fabmendozad/ExData_Plotting1 | dceb3dfb63dd5e09d142547cc536379115825095 | 962b42144776328598c1e01eb3a8a4ffb1f31366 | refs/heads/master | 2021-01-09T07:18:41.150581 | 2016-07-18T03:25:49 | 2016-07-18T03:25:49 | 63,565,902 | 0 | 0 | null | 2016-07-18T03:01:09 | 2016-07-18T03:01:08 | null | UTF-8 | R | false | false | 393 | r | Plot2.R | data <- read.table("data.txt", header=TRUE, sep=";", stringsAsFactors=FALSE)
#Subseting the data
data <- subset(data[data$Date %in% c("1/2/2007","2/2/2007"),])
dates <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("Plot2.png")
#Creating the plot
plot(dates, as.numeric(data$Global_active_power), type="l", xlab="", ylab="Global active power (kilowatts)")
dev.off()
|
c2c179275dce18885138173eec6c0ec281442855 | 14fde9767b387bba6c47a4b3246a2947c8014b82 | /tests/testthat/test-countEdges.R | f9fb20679de1ff2a0b99a2301aa3ff7743f25d52 | [
"MIT"
] | permissive | MuzheZeng/fastRTG | f99aa04a4d779f3e1407f05ee8f852e4a52e6f91 | 4591cd9003b26979e0ecd9d7ec4b662a47ed6af8 | refs/heads/master | 2021-05-23T13:42:43.775409 | 2020-05-10T22:35:54 | 2020-05-10T22:35:54 | 253,317,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 254 | r | test-countEdges.R | test_that("multiplication works", {
set.seed(123)
n = 10
k = 3
X = matrix(rnorm(n*k),n,k)
X = list(X,X,X)
G = rTensor::as.tensor(array(rexp(k^3),dim = rep(k,3)))
res = countEdges(X, G)
expect_equal(res[2], 0.08266169,tolerance = 1e-5)
})
|
fcdddcf0f1180782f762a53004bcc3733d408c76 | 60a99dc425d9edca7b3dec562f5cf6367d9c61ec | /prettyGraphs/man/prettyScree.Rd | e1ed9d65a828798977ab2c2c750db314a44c478c | [] | no_license | LukeMoraglia/ExPosition1 | e7718ae848608f1dc3934513c6588f53f2c45a7f | a69da6c5b0f14ef9fd031b98c3b40b34dad5240f | refs/heads/master | 2022-12-31T17:45:10.909002 | 2020-10-22T19:45:49 | 2020-10-22T19:45:49 | 255,486,130 | 0 | 1 | null | 2020-10-22T18:08:38 | 2020-04-14T02:01:12 | R | UTF-8 | R | false | false | 3,345 | rd | prettyScree.Rd | \name{prettyScree}
\alias{prettyScree}
\title{
prettyScree
}
\description{
prettyScree creates simple, crisp, publication-style scree plots and ``tests'' for SVD-based analyses.
}
\usage{
prettyScree(eigs, retain.col = "mediumorchid4", dismiss.col = "gray",
perc.exp = 1, n.comps = NULL, broken.stick = TRUE, kaiser = TRUE,
main = "")
}
\arguments{
\item{eigs}{a vector of \emph{positive} eigenvalues.}
\item{retain.col}{a color for components that are kept.}
\item{dismiss.col}{a color for components that are dismissed.}
\item{perc.exp}{a value between 0 and 1. Used to retain components that explain \code{perc.comp} * 100 variance. Note: perc.exp retains \code{cumsum(explained variance) < (perc.exp * 100)} + 1 component.}
\item{n.comps}{a value between 1 and \code{length(eigs)}. Used to retain \code{n.comps} number of components.}
\item{broken.stick}{a boolean. If TRUE (default), the broken-stick test is performed (see details).}
\item{kaiser}{a boolean. If TRUE (default), all components with eigenvalues greater than the \code{mean(eigs)} are retained.}
\item{main}{A title to be placed at the top of the graph.}
}
\details{
prettyScree visualizes the distribution of eigenvalues/explained variance from SVD-based analyses. Further, prettyScree performs several rudimentary ``tests''. Two rudimentary criteria are implemented: 1) user selected explained variance, and 2) user selected number of components. Additionally, two rudimentary ``tests'' are implemented: 1) the ``broken-stick'' distribution of variance model, and 2) the ``Kaiser criterion'' where all components that explain more variance than the mean are kept.
prettyScree colors components that ``pass'' all selected tests with \code{retain.col}. Any components that do not pass tests are colored by a more transparent version of \code{retain.col}. Components that do not meet any criteria for retention are colored by \code{dismiss.col}.
prettyScree should be considered ``under development'' as of 12.09.2013. The function works, but we will be adding more features in the near future.
}
\value{
\item{comps.tests}{a matrix with boolean values. Rows indicate which criteria are used, and columns correspond to components. If TRUE, a component is considered ``retained'' by that test/criteria. If FALSE, the component is ``dismissed'' according to that test/criteria.}
}
\references{
Cangelosi, R., & Goriely, A. (2007). Component retention in principal component analysis with application to cDNA microarray data. \emph{Biology direct}, \emph{2}(2), 1--21.\cr \cr
Peres-Neto, P. R., Jackson, D. A., & Somers, K. M. (2005). How many principal components? Stopping rules for determining the number of non-trivial axes revisited. \emph{Computational Statistics & Data Analysis}, \emph{49}(4), 974--997. \cr
}
\author{
Derek Beaton
}
\note{
A private function (\code{add.alpha}) was copied from http://lamages.blogspot.com/2013/04/how-to-change-alpha-value-of-colours-in.html
}
\examples{
prcomp.res <- prcomp(USArrests, scale = TRUE)
prettyScree(prcomp.res$sdev^2)
##or
princomp.res <- princomp(USArrests, cor = TRUE)
prettyScree(princomp.res$sdev^2)
}
\seealso{
Also see (some of) the other packages that perform a wide array of SVD-based analyses: \code{ExPosition}, \code{FactoMineR}, \code{ca}, \code{ade4}
}
\keyword{ multivariate }
|
07aeeda4abad9b86ebcc95a9b5b2dd56205166c1 | 92daf0af490f9fba31e08dcf485bfcd148dc0d8c | /R/weight.r | b81d8f76063bc8aadaddc766ee855cc2c39d3fb4 | [] | no_license | anu-bioinfo/gwSPIA | 30fdb16c400ce05ac0a69e3943a4548639582c55 | c4b1551d225870f791cb5afac258767ef015c95b | refs/heads/master | 2020-09-17T03:27:26.445895 | 2018-05-05T15:09:02 | 2018-05-05T15:09:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,587 | r | weight.r | #' return a list by BC
#' @param mdir
#' @param pathwaynames
#' @export
BC<-function(mdir=NULL,pathwaynames=NULL){
library(igraph)
library(KEGGgraph)
betweennesslist<-NULL
pathwayID<-NULL
for(i in 1:length(pathwaynames)){
mapkpathway<-try(parseKGML(paste(mdir,pathwaynames[[i]],sep="/")),TRUE)
mapkG<- KEGGpathway2Graph(mapkpathway, expandGenes=F)
g<-igraph.from.graphNEL(mapkG)
bet<-betweenness(g)
nodlist<-NULL
nodeslist<-NULL
nod<-nodes(mapkpathway)
for(j in 1:length(bet)){
nodname<-names(bet[j])
genename<-nod[[nodname]]@name
for(jj in 1:length(genename)){
betweenness<-rep(bet[nodname],length(genename))
}
nodlist<-t(rbind(genename,betweenness))
nodeslist<-rbind(nodeslist,nodlist)
}
betness<-nodeslist[,2]
betness<-1+as.numeric(betness)
names(betness)<-nodeslist[,1]
name<-names(betness)
name<-strsplit(as.character(name),"hsa:")
name<-do.call(rbind,name)
names(betness)<-name[,2]
betweennesslist<-c(betweennesslist,list(betness))
pathwayID<-c(pathwayID,mapkpathway@pathwayInfo@name)
}
names(betweennesslist)<-pathwayID
return(betweennesslist)
}
########################################################
#' return a list by SP
#' @param mdir
#' @param pathwaynames
#' @export
SP<-function(mdir=NULL,pathwaynames=NULL){
library(igraph)
library(KEGGgraph)
nodeslist<-NULL
pathwayID<-NULL
for(i in 1:length(pathwaynames)){
mapkpathway<-try(parseKGML(paste(mdir,pathwaynames[[i]],sep="/")),TRUE)
mapkG<- KEGGpathway2Graph(mapkpathway, expandGenes=T)
nodes<-nodes(mapkG)
nodeslist<-c(nodeslist,nodes)
}
specific_number<-table(unlist(nodeslist))
wfg<-specific_number
wfg<-as.data.frame(wfg)
nodenames<-wfg$Var1
nodenames<-strsplit(as.character(nodenames),"hsa:")
nodenames<-do.call(rbind,nodenames)
wfg<-wfg[,2]
names(wfg)<-nodenames[,2]
return(wfg)
}
##############################################################
#' return a list by IF
#' @param mdir
#' @param pathwaynames
#' @param DE
#' @param ALL
#' @export
IF<-function(mdir=NULL,pathwaynames=NULL,DE=NULL,ALL=NULL){
library(igraph)
library(KEGGgraph)
all<-paste('hsa:',ALL,sep="")
DE<-names(DE)
DE<-paste('hsa:',DE,sep="")
edge<-NULL
nodeslist<-NULL
for(i in 1:length(pathwaynames)){
mapkpathway<-try(parseKGML(paste(mdir,pathwaynames[[i]],sep="/")),TRUE)
mapkG<-KEGGpathway2Graph(mapkpathway,expandGenes=TRUE)
node<-nodes(mapkG)
edL<-edgeData(mapkG)
circsp<-strsplit(as.character(names(edL)),"\\|")
geneName<-do.call(rbind,circsp)
edge<-rbind(edge,geneName)
nodeslist<-c(nodeslist,node)
}
nodeslist<-nodeslist[!duplicated(nodeslist)]
e<-unique.matrix(edge)
g<-graph_from_edgelist(e)
mapk<-igraph.to.graphNEL(g)
nodes<-nodes(mapk)
neighborhoods<-ego(g,1,nodes,"out")
inter<-function(X){
nde<-length(intersect(names(X),DE))
}
nDE<-lapply(neighborhoods,inter)
nDE<-as.numeric(as.matrix(nDE))
signodes<-setdiff(nodeslist,nodes)
nDEsn<-rep(0,length(signodes))
nDE<-c(nDE,nDEsn)
names(nDE)<-c(nodes,signodes)
nodenames<-strsplit(as.character(c(nodes,signodes)),"hsa:")
nodenames<-do.call(rbind,nodenames)
names(nDE)<-nodenames[,2]
return(nDE)
}
##############################################################
#' return a list by IF
#' @param wfg
#' @param nDE
#' @export
wi<-function(wfg=NULL,nDE=NULL){
DEdegree<-nDE[names(wfg)]
wig<-DEdegree/wfg
write.csv(wig,"./GWSPIA/w_8671.csv")
wig<-1+((wig-min(wig))/(max(wig)-min(wig)))
return(wig)
}
|
904b4880d55ab71c10fa98d6797b22e05149bac4 | 202c09d8917b23bcb2356f39a12b2d349ba47c81 | /run_Analysis.R | 16ccb4496327fad2c1c00d22f220248ce9198505 | [] | no_license | vannguyen17/gettingandcleaningdata | 3377d127dbc3eccc8f0c0767023ae9d9704ee73d | 8f5301cca5f59b4e6eb31f03fbf5f2ce29bd35e0 | refs/heads/master | 2016-08-08T23:27:05.834145 | 2016-03-14T06:07:42 | 2016-03-14T06:07:42 | 53,827,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,123 | r | run_Analysis.R | #Download the data into the data folder and unzip it
if(!file.exists("./data")) {dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/data.zip")
unzip(zipfile = "./data/data.zip",exdir = "./data")
#List files
filepath <- file.path("./data/UCI HAR Dataset")
files <- list.files(filepath, recursive=TRUE)
files
#Import training and test data
activitytrain <- read.table(file.path(filepath, "train", "Y_train.txt"))
activitytest <- read.table(file.path(filepath, "test" , "Y_test.txt" ))
subjecttrain <- read.table(file.path(filepath, "train", "subject_train.txt"))
subjecttest <- read.table(file.path(filepath, "test" , "subject_test.txt"))
featurestrain <- read.table(file.path(filepath, "train", "X_train.txt"))
featurestest <- read.table(file.path(filepath, "test" , "X_test.txt" ))
#Combine rows of training and test dataframes
activityfull <- rbind(activitytrain, activitytest)
subjectfull <- rbind(subjecttrain, subjecttest)
featuresfull <- rbind(featurestrain, featurestest)
#Set names to variables
names(activityfull) <- c("activity")
names(subjectfull) <- c("subject")
featuresnames <- read.table(file.path(filepath, "features.txt"))
names(featuresfull) <- featuresnames$V2
#Merge columns to get full dataframe
subjectactivity <- cbind(subjectfull, activityfull)
data <- cbind(featuresfull, subjectactivity)
#Subset names of features by mean or standard deviation
subfeaturesnames <- featuresnames$V2[grep("mean\\(\\)|std\\(\\)", featuresnames$V2)]
#Subset the full dataframe by seleted names of features
selectednames <- c(as.character(subfeaturesnames), "subject", "activity" )
datameanstd <- subset(data, select = selectednames)
#Load activity names and replace in full dataframe
activitylabels <- read.table(file.path(filepath, "activity_labels.txt"))
datameanstd$activity <- gsub("1", "WALKING", datameanstd$activity)
datameanstd$activity <- gsub("2", "WALKING_UPSTAIRS", datameanstd$activity)
datameanstd$activity <- gsub("3", "WALKING_DOWNSTAIRS", datameanstd$activity)
datameanstd$activity <- gsub("4", "SITTING", datameanstd$activity)
datameanstd$activity <- gsub("5", "STANDING", datameanstd$activity)
datameanstd$activity <- gsub("6", "LAYING", datameanstd$activity)
#Label the data set with descriptive variable names
names(datameanstd) <- gsub("^t", "time", names(datameanstd))
names(datameanstd) <- gsub("^f", "frequency", names(datameanstd))
names(datameanstd) <- gsub("Acc", "Accelerometer", names(datameanstd))
names(datameanstd) <- gsub("Gyro", "Gyroscope", names(datameanstd))
names(datameanstd) <- gsub("Mag", "Magnitude", names(datameanstd))
names(datameanstd) <- gsub("BodyBody", "Body", names(datameanstd))
#Create independent tidy data set with the average of each variable for each activity and each subject
tidydata <- aggregate(. ~subject + activity, datameanstd, mean)
tidydata <- tidydata[order(tidydata$subject, tidydata$activity), ]
write.table(tidydata, file = "tidydata.txt", row.name = FALSE) |
105400827193a389bfd0d1dbf44ed86cb8ad6dc6 | ef8ac8957dec0620f59b169cde8bb13875a2b79e | /rankall.R | 8338633a2bd095e44bfce863af7867f3b7c6192b | [] | no_license | lulunaheed/rankhospital | 70e22337ea1c47e532e36120de6b27523f9fea9b | c2cd2c575fdde1328dcad30614fc87f1f937a3dd | refs/heads/master | 2020-06-15T10:15:29.648585 | 2019-07-04T16:12:37 | 2019-07-04T16:12:37 | 195,271,106 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,292 | r | rankall.R | rankall<- function (outcome,num="best"){
loc <- "/Users/lilafakharzadeh/Desktop/Coursera/rprog_data_ProgAssignment3-data"
setwd(loc)
## Read outcome data
outcomedata<-read.csv("outcome-of-care-measures.csv",na.strings="Not Available",stringsAsFactors=FALSE)
statesused<-unique(outcomedata[,7])
statenames<-statesused[order(statesused)]
numstates<-length(statenames)
hs<-character()
sts<-character()
for (j in 1:numstates){
state<-statenames[j]
sts[j]<-as.character(state)
vals<-(outcomedata[,7]==state)
newstate<-outcomedata[vals,7]
if ((sum(vals))==0){
stop(geterrmessage="invalid state")
}
if (outcome=="heart attack"){
newoutcome<-outcomedata[vals,11]
newhospital<-outcomedata[vals,2]
n<-order(state=newstate,outcome=newoutcome,hospital=newhospital)
newmat<-data.frame(state=newstate[n],outcome=newoutcome[n],hospital=newhospital[n])
good<-complete.cases(newmat)
newmat<-data.frame(state=newmat$state[good],outcome=newmat$outcome[good],hospital=newmat$hospital[good])
} else if (outcome=="heart failure"){
newoutcome<-outcomedata[,17][vals]
newhospital<-outcomedata[vals,2]
n<-order(state=newstate,outcome=newoutcome,hospital=newhospital)
newmat<-data.frame(state=newstate[n],outcome=newoutcome[n],hospital=newhospital[n])
good<-complete.cases(newmat)
newmat<-data.frame(state=newmat$state[good],outcome=newmat$outcome[good],hospital=newmat$hospital[good])
} else if (outcome=="pneumonia"){
newoutcome<-outcomedata[,23][vals]
newhospital<-outcomedata[vals,2]
n<-order(state=newstate,outcome=newoutcome,hospital=newhospital)
newmat<-data.frame(state=newstate[n],outcome=newoutcome[n],hospital=newhospital[n])
good<-complete.cases(newmat)
newmat<-data.frame(state=newmat$state[good],outcome=newmat$outcome[good],hospital=newmat$hospital[good])
} else {
stop(geterrmessage="invalid outcome")
}
if (num=="best"){
a<-1
hs[j]<-as.character(newmat$hospital[a])
}else if (num=="worst"){
a<-length(newmat$hospital[good])
hs[j]<-as.character(newmat$hospital[a])
}else {
hs[j]<-as.character(newmat$hospital[num])
}
}
final<-data.frame(HospitalName=hs,StateName=sts)
} |
e7403bfab4c9ac88f8cd9efcb5efca60e9b8c759 | 803ad45524e5009e304bae220096c1ad9d2bf67a | /postProcessingEvidenceUnify/R/evidenceUnify.R | 2880afcc34a154c4f0839fbef80d5d0106b263df | [
"Apache-2.0"
] | permissive | kingfish777/CommonEvidenceModel | 07429f633d896459183a16fbd8e37456b3061ac3 | 938a8ec215dd710a0edfb6ef061cd573f0593252 | refs/heads/master | 2020-04-26T06:45:48.421423 | 2018-09-18T18:18:15 | 2018-09-18T18:18:15 | 173,375,699 | 1 | 0 | null | 2019-03-01T22:21:20 | 2019-03-01T22:21:19 | null | UTF-8 | R | false | false | 1,506 | r | evidenceUnify.R | # Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of evidenceProcessingClean
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Pubmed Pull
#'
#' @param conn connection information
#'
#' @param targetDbSchema what schema do you want to write to
#'
#' @param targetTable what table do you want to store in
#'
#' @param sourceSchema where can evidence be located
#'
#' @export
evidenceUnify <- function(conn,targetDbSchema,targetTable,sourceSchema){
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "evidence_unify.sql",
packageName = "postProcessingEvidenceUnify",
dbms = attr(conn, "dbms"),
oracleTempSchema = NULL,
sourceSchema=sourceSchema,
tableName=paste0(targetDbSchema,'.',targetTable))
DatabaseConnector::executeSql(conn=conn,sql)
}
|
96ea3552d6d9d13a24d5997ba057a96a37964092 | 091211fc733515cbcd42ad63998fcf6184bf3e77 | /man/hybrid.hourly.Rd | 0617870213a560adc2f18c4eee8350836383c90a | [] | no_license | AndrewYRoyal/ebase | 3560e2e4e717120357b066f27fbfa094d6bb34ec | 7decc805dc80d26a77505c8c4fb87816c63a7a24 | refs/heads/master | 2022-12-22T17:23:30.440452 | 2020-09-30T12:31:43 | 2020-09-30T12:31:43 | 168,870,979 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 241 | rd | hybrid.hourly.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hybrid.R
\name{hybrid.hourly}
\alias{hybrid.hourly}
\title{Hybrid Hourly}
\usage{
\method{hybrid}{hourly}(dat, model_options, ...)
}
\description{
Hybrid Hourly
}
|
f4b298c23e62ae1a0c2164c10498e0156aec6015 | 2d24c72abe89c38bc13682ca2a048ffa97dcf9c3 | /Blatt1_Aufgabe3.R | df1a33add17f328b7b27cda7cd18733f03da916b | [] | no_license | curala70/StatistikSS2017 | e42c93fe62255774727e0e44cad6ca97328fc008 | b535c398ee1ad6ec6a0434b759ace077712108ac | refs/heads/master | 2021-01-20T02:05:54.611177 | 2017-06-27T08:45:11 | 2017-06-27T08:45:11 | 89,371,542 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 358 | r | Blatt1_Aufgabe3.R | byte = function() {
# Konvertiere Zahl zwischen 0 und 255 in ein Byte
as.integer(intToBits(sample(0:255,1))[1:8])
}
# Wiederhole 10^4 mal die Summe des Byte
exp=replicate(10000,sum(byte()))
# Erstelle Histogramm
hist(replicate(10000,sum(byte())),freq=T,main='Binomialverteilung',
xlab='Wert',ylab='Relative Häufigkeit',breaks=0:9-0.5)
|
fb5e06e211c14fd25f072a279d5268d27790bf32 | 8c026eb8ce94d81cdfba0073fb2d8fc767cca4a1 | /McSwan/R/coalesce.R | 70780a93fe172fd186e9097fe63838ec4183b59c | [] | no_license | sunyatin/McSwan | 832018c82b3cecd354f3eb31af63e7de227162c2 | b86869b56892bbdf4b250b012c808dbeae5becf3 | refs/heads/master | 2023-02-08T14:10:05.278919 | 2023-02-03T13:57:23 | 2023-02-03T13:57:23 | 76,260,197 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 11,845 | r | coalesce.R |
#' @title Extract the deme effective sizes just before an event time-scaled age
#' @param ms the ms-formatted demographic history
#' @param islandIndex the index of the focal island
#' @param eventAge the age of the event, scaled in units of 4No
#' @return the resizing coefficient, in reference to No
#' @keywords internal
get_size <- function(ms, islandIndex, eventAge) {
# IMPORTANT NOTE: McSwan will NOT re-establish Ne conditioned by population growth before (backward in time) the sweep
msarr <- unlist(strsplit(ms, " "))
df <- c()
z <- which(msarr=="-n")
if (length(z)>0) z <- z[sapply(z, function(x) msarr[x+1]==islandIndex)]
if (length(z)>0) df <- rbind(df, cbind("n", z, 0, msarr[z+2]))
z <- which(msarr=="-N")
if (length(z)>0) df <- rbind(df, cbind("N", z, 0, msarr[z+1]))
z <- which(msarr=="-en")
if (length(z)>0) z <- z[sapply(z, function(x) as.numeric(msarr[x+1])<=eventAge &&
as.numeric(msarr[x+2])==islandIndex)]
if (length(z)>0) df <- rbind(df, cbind("en", z, msarr[z+1], msarr[z+3]))
z <- which(msarr=="-eN")
if (length(z)>0) z <- z[sapply(z, function(x) as.numeric(msarr[x+1])<=eventAge)]
if (length(z)>0) df <- rbind(df, cbind("eN", z, msarr[z+1], msarr[z+2]))
if (is.null(nrow(df)) || nrow(df)==0) return(1)
df <- as.data.frame(df, stringsAsFactors=F)
colnames(df) <- c("type", "index", "age", "k")
df$age <- as.numeric(df$age)
return(as.numeric(df[which.max(df$age),"k"]))
}
#' @title Extract all demographic events involving a given island, before an event time-scaled age
#' @description does not handle -es -eM -eG -eN -N -M -ema / this internal function is only called if method=="partition" or method=="nMigrants" at \code{coalesce()}
#' @param ms the ms-formatted demographic history
#' @param islandIndex the index of the focal island
#' @param eventAge the age of the event, scaled in units of 4No
#' @return a string of ms-formatted history events affecting the twinned/duplicated focal island
#' @keywords internal
get_events <- function(ms, islandIndex, eventAge) {
# ___/!\___ this internal function is only called if method=="partition" or method=="nMigrants" at \code{coalesce()}
msarr <- unlist(strsplit(ms, " "))
nOriIsl <- as.integer(msarr[which(msarr=="-I")+1])
add <- c()
# -n | -g
ng <- which(msarr=="-n"|msarr=="-g")
if (length(ng)>0) {
ngI <- ng[sapply(ng, function(x) msarr[x+1]==islandIndex)]
for (i in seq_along(ngI)) {
d <- msarr[ngI[i]:(ngI[i]+2)]
d[2] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
}
# -m
m <- which(msarr=="-m")
if (length(m)>0) {
mI1 <- m[sapply(m, function(x) msarr[x+1]==islandIndex)]
for (i in seq_along(mI1)) {
d <- msarr[mI1[i]:(mI1[i]+3)]
d[2] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
mI2 <- m[sapply(m, function(x) msarr[x+2]==islandIndex)]
for (i in seq_along(mI2)) {
d <- msarr[mI2[i]:(mI2[i]+3)]
d[3] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
}
# -en | -eg
eng <- which(msarr=="-en"|msarr=="-eg")
if (length(eng)>0) {
engI <- eng[sapply(eng, function(x) as.numeric(msarr[x+1])<=eventAge & msarr[x+2]==islandIndex)]
for (i in seq_along(engI)) {
d <- msarr[engI[i]:(engI[i]+3)]
d[3] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
}
# -em
em <- which(msarr=="-em")
if (length(em)>0) {
emI1 <- em[sapply(em, function(x) as.numeric(msarr[x+1])<=eventAge & msarr[x+2]==islandIndex)]
for (i in seq_along(emI1)) {
d <- msarr[emI1[i]:(emI1[i]+4)]
d[3] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
emI2 <- em[sapply(em, function(x) as.numeric(msarr[x+1])<=eventAge & msarr[x+3]==islandIndex)]
for (i in seq_along(emI2)) {
d <- msarr[emI2[i]:(emI2[i]+4)]
d[4] <- nOriIsl + 1
add <- c(add, paste(d, collapse=" "))
}
}
return(paste(add, collapse=" "))
}
#' @title Coalescent simulations
#' @description Given a \code{referenceTable} object generated with \code{\link{generate_priors}}, this function performs coalescent simulations using Hudson's \emph{MS} simulator.
#' @param x an initialized \code{referenceTable} object
#' @param execute (logical) whether to execute the simulations
#' @param verbose (logical) verbose mode
#' @return A reference table with all simulated site frequency spectra stored in the \code{SFS} slot.
#' @details If you want to see the demographic command lines for each model before simulating, set \code{execute = FALSE} and \code{verbose = TRUE}.
#' @seealso \code{\link{generate_priors}}, \code{\link{dim_reduction}}
#' @references Hudson, R.R. (2002). Generating samples under a Wright-Fisher neutral model of genetic variation. Bioinformatics, 18, 337-338.
#' @examples Please refer to the vignette.
#' @export
coalesce <- function(x, execute = TRUE, verbose = FALSE) {
# method: partition || nMigrants || NULL (NULL is recommended, RT 14012017)
# { internally set options
method = "NULL" # NO SUBPARTITION (unlike Przeworski) and therefore also NO MIGRATION
nRep = 1 # nRep (optional) number of repetitions to approximate the sweep (default 100)
SAA = 1
phyclust = TRUE
## }
G <- x$GENERAL
P <- x$PRIORS
SFS <- list()
nIsl <- length(G$islandSizes)
msarr <- unlist(strsplit(G$msDemography, " "))
# altering theta by incorporating windowSize specification
msarr[4] <- sprintf('%f', as.numeric(msarr[4]) * G$windowSize)
##############################
##############################
# NEUTRAL KINGMAN
if (execute) file.create(paste(tempDir,"/segsites.txt",sep=""), showWarnings=FALSE)
if (execute) file.create(paste(tempDir,"/sfs",sep=""), showWarnings=FALSE)
cat("\n> Neutral model\n")
if (execute) {
if (phyclust) {
if (verbose) cat(paste(c(msarr[-c(1:2)],"\n"), collapse=" "))
phyclust::ms(nsam = as.integer(msarr[1]),
nreps = as.integer(G$nSimul),
opts = paste(msarr[-c(1:2)], collapse=" "),
temp.file = paste(tempDir,"/segsites.txt",sep=""),
tbs.matrix = NULL)
} else {
msarr[2] <- sprintf('%i', G$nSimul)
cmd <- paste(msPath,paste(msarr,collapse=" "),">",tempDir,"/segsites.txt")
system(cmd, intern = F)
}
}
cmd <- paste(pythonPath," ",pyPath," -i ",tempDir,"/segsites.txt -o ",tempDir,"/sfs.txt", sep="")
if (G$folded==TRUE) cmd <- paste(cmd,"--fold")
if (execute) system(cmd, intern=F)
if (execute) file.create(paste(tempDir,"/segsites.txt",sep=""), showWarnings=FALSE)
if (execute) SFS[["i0"]] <- as.matrix(read.table(paste(tempDir,"/sfs.txt",sep=""), header=T, sep="\t"))
##############################
##############################
# MULTICOALESCENT
cat("\n> Selective models\n\n")
# bottleneck intensity to mimick multicoalescence
Ib <- (1/2) /(G$No)
for ( i in seq_along(G$islandSizes) ) {
if (execute) file.create(paste(tempDir,"/segsites.txt",sep=""), showWarnings=FALSE)
if (execute) file.create(paste(tempDir,"/sfs",sep=""), showWarnings=FALSE)
if (!verbose) cat(">>> Population index: ",i,"\n")
for ( j in 1:G$nSimul ) {
if (verbose) cat(paste(">>> Population Index: ",i," - Simulation: ",j,"\n"))
if (!verbose) cat(j,"")
msarrTmp <- msarr
msarrTmp[2] <- sprintf('%i', nRep)
# altering island specification
I <- which(msarrTmp=="-I")
islSize <- as.integer(msarrTmp[I+1+i])
nOriIsl <- as.integer(msarrTmp[I+1])
if (method=="partition") {
nNonSweeping <- floor( islSize * P[[i]]$recRate[j] )
nSweeping <- islSize - nNonSweeping
msarrTmp[I+1+i] <- nSweeping
msarrTmp[I+1+nIsl] <- paste(msarrTmp[I+1+nIsl], nNonSweeping, 0)
msarrTmp[I+1] <- 1L + as.integer(msarrTmp[I+1])
} else if (method=="nMigrants") {
msarrTmp[I+1+i] <- islSize
msarrTmp[I+1+nIsl] <- paste(msarrTmp[I+1+nIsl], 0, 0)
msarrTmp[I+1] <- 1L + as.integer(msarrTmp[I+1])
} else {
# 14012016
# NADA
}
if (method=="partition" || method=="nMigrants") {
# sweep age (absolute count of generations)
Ts <- P[[i]]$sweepAge[j]
# demographic events for the new twin subisland
twinMS <- get_events(G$ms, i, Ts/(4*G$No))
# near k
islK <- get_size(G$ms, i, Ts/(4*G$No))
# opts list
addMigr <- ifelse(method=="nMigrants", P[[i]]$recRate[j]*G$No*4, 0)
cmdList <- paste(paste(msarrTmp[-c(1:2)],collapse=" "),
twinMS,
# bottleneck to mimick multicoalescence
"-en", Ts/(4*G$No), i, Ib,
# migration
"-m", i, nOriIsl+1, addMigr,
# merge sub-islands after multicoalescence
###### TO IMPROVE TO AVOID CONFLICT OF ej AGES
"-en", (Ts+islSize-1)/(4*G$No), i, islK,
"-ej", (Ts+islSize)/(4*G$No), nOriIsl+1, i)
cmdList <- gsub("\\s+", " ", cmdList)
} else {
# sweep age (absolute count of generations)
Ts <- P[[i]]$sweepAge[j]
# near k
islK <- get_size(G$ms, i, Ts/(4*G$No))
cmdList <- paste(paste(msarrTmp[-c(1:2)],collapse=" "),
# bottleneck to mimick multicoalescence
"-en", Ts/(4*G$No), i, Ib,
# merge sub-islands after multicoalescence
###### TO IMPROVE TO AVOID CONFLICT OF ej AGES
"-en", (Ts+islSize-1)/(4*G$No), i, islK)
cmdList <- gsub("\\s+", " ", cmdList)
}
if (execute) {
if (phyclust) {
if (verbose) cat(paste(cmdList,"\n"))
phyclust::ms(nsam = as.integer(msarrTmp[1]),
nreps = nRep,
opts = cmdList,
temp.file = paste(tempDir,"/segsites.txt",sep=""),
tbs.matrix = NULL)
} else {
# ms command
stop("bad")
cmd <- paste(msPath,paste(msarrTmp,collapse=" "),
twinMS,
# migration
"-m",i,nOriIsl+1,m,
# bottleneck to mimick multicoalescence
"-en",Ts/(4*G$No),i,Ib,
# merge sub-islands after multicoalescence
###### TO IMPROVE TO AVOID CONFLICT OF ej AGES
"-en",(Ts+islSize-1)/(4*G$No),islK,
"-ej",(Ts+islSize)/(4*G$No),nOriIsl+1,i,
"> temp/segsites.txt")
cmd <- gsub("\\s+", " ", cmd)
if (verbose) cat(paste(cmd,"\n"))
if (execute) system(cmd, intern=F)
}
}
#cmdPy <- paste("python ",pyPath," -i ",tempDir,"/segsites.txt -o ",tempDir,"/sfs.txt -m ",nOriIsl+1," ",i, sep="")
#if (execute) system(cmdPy)
#if (execute) file.create(paste(tempDir,"/segsites.txt",sep=""), showWarnings=FALSE)
#if (execute) SFS[[paste("i",i,sep="")]] <- rbind(SFS[[paste("i",i,sep="")]], apply(read.table(paste(tempDir,"/sfs.txt",sep=""), sep="\t", header=T), 2, mean))
}
if (method=="partition" || method=="nMigrants") {
cmdPy <- paste(pythonPath," ",pyPath," -i ",tempDir,"/segsites.txt -o ",tempDir,"/sfs.txt -m ",nOriIsl+1," ",i, sep="")
} else {
cmdPy <- paste0(pythonPath," ",pyPath," -i ",tempDir,"/segsites.txt -o ",tempDir,"/sfs.txt")
}
if (G$folded==TRUE) cmdPy <- paste(cmdPy,"--fold")
if (execute) system(cmdPy)
if (execute) SFS[[paste("i",i,sep="")]] <- as.matrix(read.table(paste(tempDir,"/sfs.txt",sep=""), header=T, sep="\t"))
if (!verbose) cat("\n\n")
}
x$SFS <- SFS
x$GENERAL$call.coalesce <- match.call()
return(x)
}
|
227325b48670398239499734b97b27239e8a5f08 | 5c1a533945187081ebda39902468140d98f3e0a2 | /man/makeExampleDB.Rd | 8809ba23d8a72ea72215f121ccae7056335ce143 | [] | no_license | cran/arealDB | 40fb8b955aab850010ad74662cfad71a57828656 | 3c724a46811ba71288f311e11b20d1c8d065200d | refs/heads/master | 2023-07-05T11:01:30.807752 | 2023-07-03T09:00:02 | 2023-07-03T09:00:02 | 276,648,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,794 | rd | makeExampleDB.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeExampleDB.R
\name{makeExampleDB}
\alias{makeExampleDB}
\title{Build an example database}
\usage{
makeExampleDB(path = NULL, until = NULL, verbose = FALSE)
}
\arguments{
\item{path}{[\code{character(1)}]\cr The database gets created by default in
tempdir(), but if you want it in a particular location, specify that in
this argument.}
\item{until}{[\code{character(1)}]\cr The database building step in terms of
the function names until which the example database shall be built, one of
\code{"start_arealDB"}, \code{"regDataseries"}, \code{"regGeometry"},
\code{"regTable"}, \code{"normGeometry"} or \code{"normTable"}.}
\item{verbose}{[\code{logical(1)}]\cr be verbose about building the example
database (default \code{FALSE}).}
}
\value{
No return value, called for the side effect of creating an example
database at the specified \code{path}.
}
\description{
This function helps setting up an example database up until a certain step.
}
\details{
Setting up a database with an R-based tool can appear to be
cumbersome and too complex and thus intimidating. By creating an example
database, this functions allows interested users to learn step by step how
to build a database of areal data. Moreover, all functions in this package
contain verbose information and ask for information that would be missing
or lead to an inconsistent database, before a failure renders hours of work
useless.
}
\examples{
if(dev.interactive()){
# to build the full example database
makeExampleDB(path = paste0(tempdir(), "/newDB"))
# to make the example database until a certain step
makeExampleDB(path = paste0(tempdir(), "/newDB"), until = "regDataseries")
}
}
|
767c215977e6d137aa6c97faa5ddf7709059401e | 516e4fc6ff9b1840be8156d9458e203135fd1d56 | /R/fst.global.monoecious.R | 0e5df4e4bec1f63d447f3291aa6396f204c49d58 | [] | no_license | MarcoAndrello/MetaPopGen_0.0.8 | dfc30bb1a9ce3ab58beda0d9990d1e4ab62aa54e | 9d0d38fcbbc8417edf5f7220cdbf55c1f6b2aee7 | refs/heads/master | 2021-01-25T12:36:42.201135 | 2018-09-17T18:13:46 | 2018-09-17T18:13:46 | 123,484,225 | 0 | 0 | null | 2018-09-17T18:13:47 | 2018-03-01T19:45:05 | R | UTF-8 | R | false | false | 1,156 | r | fst.global.monoecious.R | # Global fst
fst.global.monoecious <- function(N,t) {
if (is.character(N)) {
dir.res.name <- paste(getwd(),N,sep="/")
name.file <- paste(N,"/N",t,".RData",sep="")
load(name.file)
} else {
N <- N[,,,t]
}
# Define basic variables
m <- dim(N)[1] # Number of genotypes
l <- (sqrt(1+8*m)-1)/2 # Number of alleles
n <- dim(N)[2] # Number of demes
# Genotype arrays and number of individuals for each deme
N_i <- apply(N,c(1,2),sum)
n_i <- apply(N,2,sum)
# Genotype arrays and number of individuals for total population
N_T <- apply(N,1,sum)
n_T <- sum(N)
# Calculate allele frequencies in each group and in the total population
p_i <- array(NA,dim=c(n,l))
for (i in 1 : n) {
p_i[i,] <- freq.all(N_i[,i])
}
p_T <- freq.all(N_T)
# Calculate expected heterozygosities in each group and in the total population
H_S_i <- array(NA,dim=n)
for (i in 1 : n) {
H_S_i[i] <- 1 - sum(p_i[i,]^2)
}
H_t <- 1 - sum(p_T^2)
# Calculate fst as ratio of heterozygosities
fst <- ( H_t - sum(n_i * H_S_i,na.rm=T)/n_T) / H_t
return(fst)
} |
ffb2abf15a7bd48efc962b5f8e8e55703483f492 | 331cc0bf5eadeb319ae2c1bc9acd5280b2be4279 | /Lab_3/kouvaris_workspace.R | 08c5f508b9d8ac2cbb91f7c5b256103c85590fc3 | [] | no_license | timbo112711/Data-Mining | 221e9d207cfdb034eaf232b24983995e825ef10e | 4f5fed920b023a9bc54d2a5778325614fb444a78 | refs/heads/master | 2020-04-08T20:12:33.739073 | 2018-11-29T15:40:20 | 2018-11-29T15:40:20 | 159,688,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,300 | r | kouvaris_workspace.R | library(arules)
library(arulesViz)
library(dplyr)
library(readr)
library(dplyr)
library(tidyr)
library(arules)
library(arulesViz)
library(methods)
library(tseries)
library(ggplot2)
setwd("~/Dropbox/SMU/7331_mining/pdti_DataMining/Lab_3/")
# import python pre-processed for other mining
df = read.csv('./data_cleaned.csv')
rules <- apriori(df, parameter = list(minlen=2,supp=0.05, conf=0.8))
rules.sorted <- sort(rules, by="lift")
plot(rules.sorted, method="grouped")
head(df)
summary(rules)
items(rules)
inspect(rules)
df_rules <- as(rules,"data.frame")
rules1 <- apriori(df, parameter = list(minlen=2,supp=0.05, conf=0.8))
df_rules1 <- as(rules1,"data.frame")
plot(rules1)
rules1.sorted <- sort(rules1, by="lift")
plot(rules1.sorted, method="grouped")
rules2 <- apriori(df, parameter = list(minlen=2,supp=0.15, conf=0.8))
df_rules2 <- as(rules2,"data.frame")
plot(rules2)
rules3 <- apriori(df, parameter = list(minlen=2,supp=0.05, conf=0.5))
df_rules3 <- as(rules3,"data.frame")
plot(rules3)
rules4 <- apriori(df, parameter = list(minlen=2,supp=0.15, conf=0.8))
df_rules4 <- as(rules4,"data.frame")
plot(rules4)
# market basket data set
#raw data
orig = read.csv('./online_retail.csv')
# create baskets dataframe
df_baskets <- orig %>%
group_by(InvoiceNo,InvoiceDate) %>%
summarise(basket = as.vector(list(StockCode)))
# compute transactions
transactions <- as(df_baskets$basket, "transactions")
plot.new
hist(size(transactions), breaks = 0:150, xaxt="n", ylim=c(0,250000),
main = "Number of Items per basket", xlab = "#Items")
axis(1, at=seq(0,160,by=10), cex.axis=0.8)
mtext(paste("Total:", length(transactions), "baskets,", sum(size(transactions)), "items"))
rules_baskets <- apriori(transactions, parameter = list(minlen=2,supp=0.05, conf=0.8))
rules_baskets.sorted <- sort(rules, by="lift")
inspect(rules_baskets)
df_rules_baskets <- as(rules_baskets,"data.frame")
##ARIMA
orig$daily_rev <- orig$UnitPrice * orig$Quantity
aggdata <-aggregate(orig$daily_rev, by=list(as.Date(orig$InvoiceDate)),
FUN=sum, na.rm=TRUE)
ggplot2(orig$daily_rev)
arima(orig$daily_rev)
ggplot(df, aes(df$day_of_year), df$quantity_groups)) + geom_line() + scale_x_date('month') + ylab("Daily Bike Checkouts") +
xlab("")
ggplot()
df$day_of_year
|
a97f87b0f8f39b68390a1ef0da78e767c0a1d961 | 3f80b5455566002c67aaf18bd2178a200b1f1352 | /clipDomainPolygon_v2.R | f9ca2b15d010da999d06587318e3b653392ae318 | [] | no_license | peterbat1/RandR_modelReview_and_prep | 7f70b30ed6e28df75c01ae5d18fee245d3528ad6 | 250abe643d987208ecb3689f5f347c7287a1c9a5 | refs/heads/master | 2023-03-07T10:54:33.948549 | 2023-02-27T03:17:07 | 2023-02-27T03:17:07 | 266,959,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,043 | r | clipDomainPolygon_v2.R |
# library(sp)
# library(rgdal)
# library(rgeos)
#
# clipBounds <- function(boundsFile)
# {
# d <- readOGR(boundsFile)
#
# clippedDomainName <- sub(".geojson","_clipped.geojson",boundsFile)
#
# clippedDomain <- SpatialPolygonsDataFrame(gIntersection(oz,d), data = data.frame(ID = "1"))
#
# writeOGR(clippedDomain, clippedDomainName, "GeoJSON", driver = "GeoJSON")
# }
#
# oz <- readOGR("/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/resources/australia_polygon.shp", "australia_polygon")
#
# filePath <- "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR-webtool-maintenance/New species staging/domain"
#
# #theFiles <- list.files(filePath,"*.geojson", full.names = TRUE)
# # theFiles <- c("/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/models/domain/Acacia _longifolia_domain.geojson",
# # "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/models/domain/Acacia_terminalisD_domain.geojson",
# # "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/models/domain/Acacia_terminalisDQK3_domain.geojson")
# thisFile <- "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/models/domain/Corymbia_eximia_domain.geojson"
# #for (thisFile in theFiles)
# {
# clipBounds(thisFile)
# }
#
library(sf)
library(ggplot2)
ozPolygon <- sf::st_read("/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/resources/australia_polygon.shp")
ozPolygon <- sf::st_transform(ozPolygon, 4326)
load("/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_app3/www/models/gdm/Acacia_linifolia_genetic_model.Rd")
rawDomain <- sf::st_as_sf(md$confidence_polygon)
# rawDomain <- st_read("/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_and_prep/www/models/domain/Acacia_longifolia_domain.geojson")
#rawDomain <- sf::st_transform(rawDomain, 4326)
sf::st_crs(rawDomain) <- 4326
sf::st_write(rawDomain, dsn = "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_app3/www/models/domain/Acacia_linifolia_domain_new.geojson", driver = "GeoJSON")
clippedDomain <- sf::st_union(sf::st_intersection(rawDomain, ozPolygon))
sf::st_write(clippedDomain, dsn = "/home/peterw/Data_and_Projects/RBG Projects/Restore and Renew/RandR_webtool_dev/RandR_modelReview_app3/www/models/domain/Acacia_linifolia_domain_new_clipped.geojson", driver = "GeoJSON")
testPlot <- ggplot() +
geom_sf(data = ozPolygon, colour = "light grey") +
geom_sf(data = rawDomain, colour = "blue", fill = "blue", alpha = 0.2) +
geom_sf(data = clippedDomain, colour = "orange", fill = "orange", alpha = 0.2)
plot(testPlot)
|
c762680fa81e05449ba57a774aa871c52d699dbc | 3f926458350729475fedbca1db5fd24bfb95cb52 | /maps.R | 51d6be49b16d71224f10fd74ecdfc0a2db958c5f | [] | no_license | fall2018-saltz/ist687_project_team_3 | 0b955c9910d88222557d881f566bb2885d1ca55e | 0033c30bacba96d81430437261bd5f1748148611 | refs/heads/master | 2020-03-31T18:22:47.072637 | 2018-12-06T05:50:31 | 2018-12-06T05:50:31 | 152,456,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,741 | r | maps.R |
library(ggplot2)
library(maps)
library(ggmap)
library(gdata)
library(dplyr)
df2 <- df
df2 <- df2[!((df2$Origin.State=="U.S. Pacific Trust Territories and Possessions")|(df2$Destination.State=="U.S. Pacific Trust Territories and Possessions")),]
origin_states_low <- tolower(unique(df2$Origin.State))
dest_states_low <- tolower(unique(df2$Destination.State))
source_fl <- unique(df$Orgin.City)
dest_fl <- unique(df$Destination.City)
source_co <- geocode(source= "dsk", source_fl)
dest_co <- geocode(source="dsk", dest_fl)
usa <- map_data("state")
df1 <- data.frame(origin_states_low,dest_states_low,stringsAsFactors = FALSE)
arr <- df2 %>% select(Origin.State,Departure.Delay.in.Minutes) %>% group_by(Origin.State) %>% summarise(total_delay=mean(Departure.Delay.in.Minutes,na.rm=TRUE))
arr1 <- df2 %>% select(Destination.State,Arrival.Delay.in.Minutes) %>% group_by(Destination.State) %>% summarise(total_delay_arri=mean(Arrival.Delay.in.Minutes,na.rm=TRUE))
arr$Origin.State <- tolower(arr$Origin.State)
arr1$Destination.State <- tolower(arr1$Destination.State)
mer <- merge(df1,arr,by.x="origin_states_low",by.y="Origin.State",all=TRUE)
mer <- merge(mer,arr1,by.x="dest_states_low",by.y="Destination.State",all=TRUE)
link <- ggplot(mer,aes(map_id=origin_states_low)) + geom_map(map=usa,aes(fill=mer$total_delay)) + expand_limits(x=usa$long, y=usa$lat) + coord_map()
link
df3 <- as.data.frame(dest_states_low,stringsAsFactors = FALSE)
newdf <- merge(df3,arr1,by.x="dest_states_low",by.y="Destination.State",all=TRUE)
map1 <- ggplot(newdf,aes(map_id=dest_states_low)) + geom_map(map=usa,aes(fill=newdf$total_delay_arri)) + expand_limits(x=usa$long, y=usa$lat) + coord_map()
map1
arr2 <- df %>% select(Satisfaction,Origin.State) %>% group_by(Origin.State) %>% summarise(avg_satisfaction=mean(Satisfaction,na.rm=TRUE))
arr2$Origin.State <- tolower(arr2$Origin.State)
mer <- merge(mer,arr2,by.x="origin_states_low",by.y="Origin.State",all=TRUE)
map3 <- ggplot(mer,aes(map_id=origin_states_low)) + geom_map(map=usa,aes(fill=mer$avg_satisfaction)) + expand_limits(x=usa$long, y=usa$lat) + coord_map()
map3
airport <- data.frame(source_co,dest_co)
airport$origin_states_low <- "?"
airport <- airport[airport$lon>(-130),]
airport <- airport[airport$lat>20,]
map33 <- ggplot(mer, aes(map_id=origin_states_low)) + geom_map(map=usa) + expand_limits(x=usa$long, y=usa$lat) + coord_map()
map33
map44 <- map33 + geom_point(data=airport, aes(x=lon,y=lat),color="red")
map44
airport1 <- data.frame(source_co,dest_co)
airport1$origin_states_low <- "?"
str(airport1)
airport1 <- airport1[airport1$lon.1>(-130),]
airport1 <- airport1[airport1$lat.1>20,]
map55 <- map33 + geom_point(data=airport1, aes(x=lon.1,y=lat.1),color="blue")
map55
|
9e251bb27b6a3d9c82369b4c9493d719a4110fd6 | a177e3e6a34df54f6b72b18f07d4225234fed520 | /R_Conic_Quad.R | a464db18b296ba14df42a116e52c3ebaad16597f | [] | no_license | githubfun/pms | 81787bebd3c0d50c192b05a30fe788505b908af9 | b2de3cb85c37c29b40aab3196dcb551a7a3d2c89 | refs/heads/master | 2017-12-03T02:04:20.450005 | 2016-02-18T07:03:55 | 2016-02-18T07:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,242 | r | R_Conic_Quad.R | setwd("~/MyFile") #Dossier emplacement ? d?finir
PathCAC="returns1.csv"
PathDAX="returns DAX.csv"
SectorCAC="CAC40 sectors.csv"
SectorDAX="DAX30 sectors.csv"
returns=read.csv(file=PathDAX, header=TRUE ,sep=";") ## matrix of returns
n=ncol(returns)
list_lambda=seq(1,5,1) ## list of log risk aversion parameter
R=matrix(0,ncol=length(list_lambda),nrow=n+1)
M=20*10^6
bigbnd=10^6
L=1
c1=0.01
c2=0.01
l1=-0.05
u1=0.05
b=0.01
eps=10^(-15)
Q=as.matrix(cov(returns))
RR=chol(Q+diag(eps,n))
bl_xi2=as.matrix(rep(−bigbnd/M,n))
bl_bi_p2=as.matrix(rep(0,n))
bl_bi_n2=as.matrix(rep(0,n))
bl_ti2=as.matrix(rep(0,n))
bl_zi=as.matrix(rep(-Inf,n))
bl_add=as.matrix(rbind(0.5,as.matrix(rep(0,n-1))))
bl_x2=t(as.matrix(rbind(bl_xi2,bl_bi_p2,bl_bi_n2,bl_ti2,bl_zi,bl_add))) ## variables lower bounds
bu_xi2=as.matrix(rep(bigbnd/M,n))
bu_bi_p2=as.matrix(rep(Inf,n))
bu_bi_n2=as.matrix(rep(Inf,n))
bu_ti2=as.matrix(rep(Inf,n))
bu_zi=as.matrix(rep(Inf,n))
bu_add=rbind(0.5,as.matrix(rep(Inf,n-1)))
bu_x2=t(rbind(bu_xi2,bu_bi_p2,bu_bi_n2,bu_ti2,bu_zi,bu_add)) ## variables upper bounds
Delta_neut2=t(rbind(as.matrix(rep(1,n)),as.matrix(rep(0,5*n)))) ### Delta neutrality constraints sum(xi)=0
xi_bi_decomp2=cbind(diag(1,n),-diag(1,n),diag(1,n),diag(0,n),diag(0,n),diag(0,n)) ### xi= (bi+) - (bi-)
Booksize_constr2=t(rbind(as.matrix(rep(0,n)),as.matrix(rep(1,2*n)),as.matrix(rep(0,1*n)),as.matrix(rep(0,1*n)),as.matrix(rep(0,1*n)))) ### sum(|xi|)=sum((bi+) - (bi-)) < M
transac12=cbind(diag(c1,n),diag(0,n),diag(0,n),-diag(1,n),diag(0,n),diag(0,n))
transac22=cbind(-diag(c2,n),diag(0,n),diag(0,n),-diag(1,n),diag(0,n),diag(0,n))
cone_cst=cbind(RR,diag(0,n),diag(0,n),diag(0,n),-diag(1,n),diag(0,n))
a2=rbind(Delta_neut2,xi_bi_decomp2,Booksize_constr2,transac12,transac22,cone_cst)###constraints matrix bl_c < a%*%x < bu_c
bl_c2=t(rbind(as.matrix(rep(0,n+1)),0,as.matrix(rep(-Inf,n)),as.matrix(rep(-Inf,n)),as.matrix(rep(0,n)))) ###constraints lower bounds
bu_c2=t(rbind(as.matrix(rep(0,n+1)),M/bigbnd,as.matrix(rep(0,n)),as.matrix(rep(0,n)),as.matrix(rep(0,n))))###constraints upper bounds
R=matrix(0,ncol=length(list_lambda) , nrow=n+1) #Results Matrix
# We re−write the matricial objective function as a scalar one ,
#that we will plugin qobj below
COR=cov(returns) #Our covariance matrix
H=as.matrix(COR)
B=as.matrix(cor(returns))
p=n
E=rbind(5*n+1,5*n+2,as.matrix(seq(4*n+1,5*n,1)))
cqo1=list()
cqo1$sense= "min" ### We want to minmise qobj ### The linear part of the problem
cqo1$A=Matrix(a2,byrow=TRUE,sparse=TRUE) ### Constraints matrix
cqo1$bc=rbind(blc=bl_c2,buc=bu_c2) ### Constraints bounds
cqo1$bx=rbind(blx=bl_x2,bux=bu_x2) ### Variable bounds
NUMCONES = 1
cqo1$cones = matrix ( list (), nrow =2, ncol = NUMCONES )
rownames ( cqo1$cones )= c("type","sub")
cqo1$cones[ ,1] = list ("RQUAD", t(E))
j=0
i=0
for (lambda in list_lambda)
{
f2<−t(as.matrix(cbind(−returns[1,],matrix(0,nrow=1,ncol=4*n),matrix(0,nrow=1,ncol=1),
matrix(10^(lambda),nrow=1,ncol=1),matrix(0,nrow=1,ncol=n-2)))) ### The linear part of the problem
cqo1$c=f2
r=mosek(cqo1)
j=j+1
XXX=r$sol$itr$xx[1:n]
R[1,j]=-t(f2[1:n])%*%XXX
R[2,j]=t(XXX)%*%Q%*%XXX
# fname<-paste("DAXTCC",j,".png",sep="")
#xname=paste("Stock Weights",~lambda,"=",10^lambda)
#png(fname, # create PNG for the heat map
# width = 5*300, # 5 x 300 pixels
# height = 5*300,
# res = 300, # 300 pixels per inch
# pointsize = 9) # smaller font size
plot(sort(XXX,decreasing=TRUE),type="h",xlab=expression(paste("Stock Weights"))
,ylab="Weight Value",yaxp=c(-0.05,0.05,5),las=1)
#dev.off()
}
plot(10^(list_lambda),R[1,]*100,log="x",xlab=expression(paste("risk aversion factor", ~lambda) ),
ylab="optimized return",type="p",las=1,lwd=2,lty=3,col="blue")
plot(10^(list_lambda),R[2,]*10^4,log="x",xlab=expression("risk aversion factor" ~lambda ),
ylab="optimized risk",type="p",las=1,lwd=2,lty=3,col="blue")
#mosek_write(qo1, "model_contopf.txt",opts=list()) |
f164216da2ba246dcc5efeb6bea02ae3f7803501 | 964e28a895ff80388bc0bff2a7b7687ade4824ff | /cluster_code.R | 088900d25d5ab429f96606c44aa06e8e45f571ff | [] | no_license | melnyashka/Hawkes_process_with_Erlang_kernels | 91d0284dbf344b7a18560289cdac980759d08933 | d2cd6e59525ebf1dbcbdb1b9a0d8e2b7f27a3b22 | refs/heads/master | 2020-06-20T06:54:11.159579 | 2020-01-31T20:38:01 | 2020-01-31T20:38:01 | 197,032,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,163 | r | cluster_code.R | library(parallel)
library(futile.logger)
library(latex2exp)
setwd("University/neuroscience/Hawkes_with_Erlang_kernels/logs") # comment/uncomment/change the directory if necessary
date <- Sys.Date() # Create a subdirectory with the current date
wd <- getwd() # Save the name of the working directory
# parameters of logging
path_to_logs <- file.path(wd,date)
dir.create(path_to_logs)
file <- paste(path_to_logs, "logfile", sep = "/")
# flog.appender(appender.file(file))
flog.appender(appender.tee(file)) # write both to console and to file
flog.threshold(DEBUG) # By default set threshold to INFO (because I can)
flog.debug("Debugging is on!")
# Setting up the parameters
nb_pop = 2 # number of populations
nb_neur = c(50, 50) # number of neurons in population, vector of integers of length n_pop
eta_vec = c(3,2) # c(3,2) # number of memory variables, vector of integers of length n_pop
nu_vec = c(1,1) # auxilliary constants
c_vec = c(-1,1) # rates of population
delta_gen = 1e-1
f1 <- function(x)
{
if (x<log(20)) {return(10*exp(x))}
else {return( 400/(1+400*exp(-2*x)) )}
}
f2 <- function(x)
{
if (x<log(20)) {return(exp(x))}
else {return( 40/(1+400*exp(-2*x)) )}
}
intensity_function = list(f1,f2)
### Making the cluster
cl <- makeCluster(4, outfile = file)
clusterCall(cl = cl, function() {
require("futile.logger")
require("expm")
})
clusterExport(cl = cl, varlist = c("nb_pop", "nb_neur", "intensity_function", "c_vec", "nu_vec", "eta_vec", "simul_Hawkes_Erlang_Anna", "linear_ODE_matrix","time_evaluation", "X_bound", "X_bound_root", "intensity_bound", "intensity"))
HawkesProcess = parLapply(cl = cl, 1:4, function(k){
flog.debug("=====================================")
flog.debug(paste("For neur=", neur, " and discrete bound"))
X_val <- c(-4.8896667, -5.5195462, -5.5856640, -4.9379179, 1.1969114, 0.7250284, 0.2692858) # to put ourselves in stationary regime
test = simul_Hawkes_Erlang_Anna(nb_pop, nb_neuron = rep(50, 2), intensity_function, c_vec, nu_vec, eta_vec+1, X_init = X_val, percent_small_ISI = 1e-3, stop_time=20, bound_method = "polyroot")
return(test$X)
})
stopCluster(cl) |
3d99ffdf72b1c1f16e9286d35d8f4d136bfd97dc | cdf3d4455bfbba06427a143e052d73e4f38ec57c | /front_end/modules/analysis_mod.R | 4e309a6574b9cf1c98a9c45f530426aca6df5322 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | danlooo/DAnIEL | c194d695bb74577934fc68e1fa56da22c8470bf8 | 198fcd82546d3020af67020e5c021b1633718ba4 | refs/heads/main | 2023-09-06T08:39:28.620621 | 2021-11-11T15:49:59 | 2021-11-11T15:49:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 798 | r | analysis_mod.R | #!/usr/bin/env Rscript
# Copyright by Daniel Loos
#
# Research Group Systems Biology and Bioinformatics - Head: Assoc. Prof. Dr. Gianni Panagiotou
# https://www.leibniz-hki.de/en/systembiologie-und-bioinformatik.html
# Leibniz Institute for Natural Product Research and Infection Biology - Hans Knöll Institute (HKI)
# Adolf-Reichwein-Straße 23, 07745 Jena, Germany
#
# The project code is licensed under BSD 2-Clause.
# See the LICENSE file provided with the code for the full license.
#
# Interactive analysis module
#
analysis_mod_UI <- function(id) {
ns <- shiny::NS(id)
shiny::fluidPage(
width = NULL,
ml_mod_UI("ml_mod"),
correlation_mod_UI("correlation_mod"),
statistics_mod_UI("statistics_mod")
)
}
analysis_mod <- function(input, output, session, project) {
}
|
54bf548fe53edc82874aff90010d5382c5902a7b | 154f590295a74e1ca8cdde49ecbb9cbb0992147e | /man/ar1.Rd | 5de08c3e5771edf205d5015bc9a177a7e9328d73 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] | permissive | klingerf2/EflowStats | 2e57df72e154581de2df3d5de3ebd94c3da0dedf | 73891ea7da73a274227212a2ca829084149a2906 | refs/heads/master | 2017-12-07T10:47:25.943426 | 2016-12-28T20:52:42 | 2016-12-28T20:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 511 | rd | ar1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ar1.R
\name{ar1}
\alias{ar1}
\title{Function to compute the AR(1) correlation coefficient for a given data series}
\usage{
ar1(data)
}
\arguments{
\item{data}{data frame containing daily discharge data}
}
\value{
ar1 AR(1) correlation coefficient
}
\description{
This function accepts a data frame containing daily streamflow data and returns the AR(1)
correlation coefficient
}
\examples{
qfiletempf<-sampleData
ar1(qfiletempf)
}
|
b88fdac52ddacb4bfd5f9831053cd3752c2f3457 | c1c49f8c8b21edc22b5fc001646ec003343aa7db | /PROBLEM3/RScripts/windforecast3_windpower3.R | 0b31d44e388ca0bd6f5c0099e828152d7ff03ef4 | [] | no_license | himajavadaga/Kaggle-3-datasets | 9db4f904f2bf9a18eaeb63e124a576f22fbd5f68 | 05776514114c2bfdafe89f0a8c02bbcd7d071ec2 | refs/heads/master | 2021-06-04T08:04:21.842214 | 2016-07-09T04:00:34 | 2016-07-09T04:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,882 | r | windforecast3_windpower3.R |
#helps the user to pick the file
x=file.choose(new = FALSE)
#importing the file
wf3<-read.csv(x, header = TRUE)
wf3$agg_value <- rep(1:(length(wf3$date)/4), each=4)
aggdata <- aggregate(wf3, by=list(wf3$agg_value), FUN=mean)
#converting date to character
aggdata$date <- as.character(aggdata$date)
#separating the year, month, day and hour from the date column
aggdata$hour <- substr(aggdata$date,9,10)
aggdata$date <- substr(aggdata$date,1,8)
#filling the hours from 0 to 23
aggdata$hour <- rep(0:23, times=(length(aggdata$hour))/24)
#replacing 00 instead of 0 and so on till 9
aggdata$hour <- as.character(aggdata$hour)
aggdata$hour <- with(aggdata, ifelse(hour=="0", "00",ifelse(hour=="1", "01",ifelse(hour=="2", "02", ifelse(hour=="3", "03", ifelse(hour=="4", "04", ifelse(hour=="5", "05", ifelse(hour=="6", "06",ifelse(hour=="7", "07",ifelse(hour=="8", "08",ifelse(hour=="9", "09",hour)))))))))))
#concatenating the columns date and hour
aggdata$date <- do.call(paste, c(aggdata[c("date", "hour")], sep = ""))
#removing the unecessary columns
aggdata$Group.1=NULL
aggdata$hors=NULL
aggdata$agg_value=NULL
aggdata$hour=NULL
#windforecast file
#Choose the train.csv file
x=file.choose(new = FALSE)
#read the file and store it in a dataframe named windforcast
windforcast<-read.csv(x, header = TRUE)
#adding wp3 values from weatherforecast files corresponding to the dates
windforcast_wp3 <- windforcast[, c(1,4)]
wf3_wp3 =merge(x = aggdata, y = windforcast_wp3, by = "date", all.x = TRUE)
summary(wf3_wp3)
####filling the NA's in all the columns#####
library(zoo)
#Using Zoo package and filling NA's with na.fill
###u###
wf3_wp3$u <- zoo(wf3_wp3$u)
wf3_wp3$u=na.fill(wf3_wp3$u, "extend")
wf3_wp3$u= format(round(wf3_wp3$u, 3), nsmall = 3)
###v###
wf3_wp3$v <- zoo(wf3_wp3$v)
wf3_wp3$v=na.fill(wf3_wp3$v, "extend")
wf3_wp3$v= format(round(wf3_wp3$v, 3), nsmall = 3)
###ws###
wf3_wp3$ws <- zoo(wf3_wp3$ws)
wf3_wp3$ws=na.fill(wf3_wp3$ws, "extend")
wf3_wp3$ws= format(round(wf3_wp3$ws, 3), nsmall = 3)
###wd###
wf3_wp3$wd <- zoo(wf3_wp3$wd)
wf3_wp3$wd=na.fill(wf3_wp3$wd, "extend")
wf3_wp3$wd= format(round(wf3_wp3$wd, 3), nsmall = 3)
###wp3###
wf3_wp3$wp3 <- zoo(wf3_wp3$wp3)
wf3_wp3$wp3=na.fill(wf3_wp3$wp3, "extend")
wf3_wp3$wp3= format(round(wf3_wp3$wp3, 3), nsmall = 3)
for(i in 1:length(wf3_wp3$wp3))
{
if(wf3_wp3$wp3[i] == 0)
{
wf3_wp3$wp3[i]=NA
}
}
wf3_wp3$wp3 <- zoo(wf3_wp3$wp3)
wf3_wp3$wp3=na.fill(wf3_wp3$wp3, "extend")
#wf3_wp3$wp3= format(round(wf3_wp3$wp3, 3), nsmall = 3)
str(wf3_wp3$wp3)
summary(wf3_wp3)
### conversion into numeric format
wf3_wp3$date <- as.integer(wf3_wp3$date)
wf3_wp3$u <- as.numeric(wf3_wp3$u)
wf3_wp3$v <- as.numeric(wf3_wp3$v)
wf3_wp3$ws <- as.numeric(wf3_wp3$ws)
wf3_wp3$wd <- as.numeric(wf3_wp3$wd)
wf3_wp3$wp3 <- as.numeric(wf3_wp3$wp3)
str(wf3_wp3)
summary(wf3_wp3)
#converting date to character
wf3_wp3$date <- as.character(wf3_wp3$date)
#separating the year, month, day and hour from the date column
wf3_wp3$hour <- substr(wf3_wp3$date,9,10)
wf3_wp3$date <- substr(wf3_wp3$date,1,8)
wf3_wp3$date <- as.Date(wf3_wp3$date, format="%Y%m%d")
wf3_wp3$year <- format(wf3_wp3$date, format="%Y")
wf3_wp3$month <- format(wf3_wp3$date, format="%m")
wf3_wp3$day <- format(wf3_wp3$date, format="%d")
wf3_wp3$year <- as.numeric(wf3_wp3$year)
wf3_wp3$month <- as.numeric(wf3_wp3$month)
wf3_wp3$day <- as.numeric(wf3_wp3$day)
wf3_wp3$hour <- as.numeric(wf3_wp3$hour)
#removing the date column as the date has been split
wf3_wp3$date <- NULL
#Rearranging the columns
wf3_wp3 = wf3_wp3[,c(6,7,8,9,1,2,3,4,5)]
#Removing 0 values
for(i in 1:length(wf3_wp3$wp3))
{
if(wf3_wp3$wp3[i] == 0)
{
wf3_wp3$wp3[i]=NA
}
}
wf3_wp3$wp3 <- zoo(wf3_wp3$wp3)
wf3_wp3$wp3=na.fill(wf3_wp3$wp3, "extend")
wf3_wp3$wp3= format(round(wf3_wp3$wp3, 3), nsmall = 3)
wf3_wp3$wp3 <- as.numeric(wf3_wp3$wp3)
summary(wf3_wp3)
#splitting to training and test data
wf3_wp3_training <- wf3_wp3[((wf3_wp3$year=="2009") | (wf3_wp3$year=="2010")),]
wf3_wp3_test <- wf3_wp3[((wf3_wp3$year=="2011") | (wf3_wp3$year=="2012")),]
wf3_wp3_test$wp3= format(round(wf3_wp3_test$wp3, 3), nsmall = 3)
wf3_wp3_training$wp3= format(round(wf3_wp3_training$wp3, 3), nsmall = 3)
#Renumbering the rows for test data frame
rownames(wf3_wp3_test) <- NULL
#numeric conversion
wf3_wp3_test$wp3 <- as.numeric(wf3_wp3_test$wp3)
wf3_wp3_training$wp3 <- as.numeric(wf3_wp3_training$wp3)
wf3_wp3$wp3 <- as.numeric(wf3_wp3$wp3)
########################################################################################################
########################################BUILDING MODELS#################################################
########################################################################################################
##Building models for Windfarms 1 using the wf3_wp3_training datasets##
#####wf3_wp3_training#####
####REGRESSION###
summary(wf3_wp3)
summary(wf3_wp3_training)
summary(wf3_wp3_test)
#Start Regression
#install.packages('forecast')
str(wf3_wp3_training$wp3)
library(MASS)
library(ISLR)
set.seed(123)
lm.fit3= lm(wp3~.-day , data = wf3_wp3_training)
summary(lm.fit3)
library(forecast)
pred = predict(lm.fit3, wf3_wp3_test)
#Exporting ReggressionOutputs and PerformanceMatrics
a = accuracy(pred,wf3_wp3_test$wp3)
a
write.csv(a, "PerformanceMatrics_wp3.csv")
summary(wf3_wp3_training)
benchmark$wp3 <- (predict(lm.fit3,wf3_wp3))
benchmark$wp3= format(round(benchmark$wp3, 3), nsmall = 3)
####REGRESSION TREES####
library (tree)
library (MASS)
library (ISLR)
set.seed (1)
train = sample (1:nrow(wf3_wp3_training), nrow(wf3_wp3_training)/2)
tree.wf = tree(wp3~.,wf3_wp3_training,subset=train)
summary (tree.wf)
plot (tree.wf)
text (tree.wf, pretty = 0)
cv.wf = cv.tree (tree.wf)
plot (cv.wf$size, cv.wf$dev, type='b')
prune.wf =prune.tree(tree.wf, best = 4)
plot(prune.wf)
text(prune.wf, pretty = 0)
yhat=predict (tree.wf, newdata =wf3_wp3_training [-train,])
wf.test=wf3_wp3_training [-train,"wp3"]
plot(yhat,wf.test)
abline (0,1)
mean((yhat -wf.test)^2)
yhat=predict (prune.wf, newdata =wf3_wp3_training [-train,])
wf.test=wf3_wp3_training [-train,"wp3"]
plot(yhat,wf.test)
abline (0,1)
mean((yhat -wf.test)^2)
regtree=accuracy(yhat,wf.test)
regtree
write.csv(regtree,"PerformanceMetrics_Regression_Tree_wp3.csv",row.names = FALSE)
#### NEURAL NETWORKS ####
set.seed(500)
library(MASS)
# Train-test random splitting for linear model
index <- sample(1:nrow(wf3_wp3),round(0.75*nrow(wf3_wp3)))
#train <- wf3_wp3[index,]
#test <- wf3_wp3[-index,]
# Fitting linear model
lm.fit3 <- glm(wp3~.-day , data=wf3_wp3_training)
summary(lm.fit3)
# Predicted data from lm
pr.lm <- predict(lm.fit3,wf3_wp3_test)
# Test MSE
MSE.lm <- sum((pr.lm - wf3_wp3_test$wp3)^2)/nrow(wf3_wp3_test)
# Neural net fitting
# Scaling data for the NN
maxs <- apply(wf3_wp3, 2, max)
mins <- apply(wf3_wp3, 2, min)
maxs
mins
scaled <- as.data.frame(scale(wf3_wp3, center = mins, scale = maxs - mins))
# Train-test split
train_ <- scaled[index,]
test_ <- scaled[-index,]
#View(train_)
# NN training
library(neuralnet)
n <- names(train_)
f <- as.formula(paste("wp3 ~", paste(n[!n %in% "wp3"], collapse = " + ")))
nn <- neuralnet(f,data=train_,hidden=c(5,5), threshold= 0.5, linear.output=F)
print(nn)
# Visual plot of the model
plot(nn)
summary(nn)
# Predict and the covariate value should be same as nn's covariate number
pr.nn <- compute(nn,test_[,1:8])
# Results from NN are normalized (scaled)
# Descaling for comparison
pr.nn_ <- pr.nn$net.result*(max(wf3_wp3$wp3)-min(wf3_wp3$wp3))+min(wf3_wp3$wp3)
test.r <- (test_$wp3)*(max(wf3_wp3$wp3)-min(wf3_wp3$wp3))+min(wf3_wp3$wp3)
# Calculating MSE
MSE.nn <- sum((test.r - pr.nn_)^2)/nrow(test_)
# Compare the two MSEs
print(paste(MSE.lm,MSE.nn))
write.csv(MSE.nn,"PerformanceMetrics_Regression_Tree_wp2.csv",row.names = FALSE)
|
b6da2cf34d1e982e81c3d77bc77912d2fc1a71dd | dd3d2250d7511fa02f15e68e60ff12be374c3a9a | /ph.R | 5b1ddc0a046d4852c5c1c4edd7222bf9e1aedf99 | [] | no_license | mleibert/651 | feebff679ec0ffe93e41d3e50304dd23c08d7f55 | f4304d9fbbed0f7b9c330d2c6f8e447b037a45a2 | refs/heads/master | 2021-08-24T05:04:06.919720 | 2017-12-08T04:39:42 | 2017-12-08T04:40:06 | 102,660,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,210 | r | ph.R | rm(list=ls())
options(stringsAsFactors = FALSE)
options(scipen=999)
setwd("G:/math/651")
ph<-read.table("plastichardness.txt")
names(ph)<-c("Y","X")
tail(ph)
ph.n<-nrow(ph)
lm(ph$Y~ph$X)
ph.lm<-function(x) { 168.600 + 2.034 *x}
ph.msr<-sum( ( ph.lm(ph$X) - mean(ph$Y))^2 )
ph.mse<-sum( ( ph$Y -ph.lm(ph$X) )^2 )/(ph.n-2)
ph.sse<-ph.mse*(ph.n-2)
ph.ssto<-ph.sse+ph.msr
ph.F<- ph.msr/ph.mse
ph.F
anova(lm(ph$Y~ph$X))
qf(1-.01,1,ph.n-2)
ph.F<qf(1-.01,1,ph.n)
1-pf(ph.F,1,ph.n-2)
ph.sse/ph.ssto
ph.msr/ph.ssto
ph.msr-ph.sse
SPE <-rep(NA,length(unique(ph$X)))
for(i in 1:length(unique(ph$X)) ){
TY<-mean(ph[which( ph$X == unique(ph$X)[i] ),]$Y)
SPE[i]<-sum(((ph[which( ph$X == unique(ph$X)[i] ),]$Y)-TY)^2)
}
ph.c<-length(unique(ph$X))
((ph.sse - sum(SPE) ) / ((ph.n-2)-(ph.n-ph.c)) ) / (sum(SPE) / (ph.n-ph.c))<
qf(1-.01,ph.c-2,ph.n-ph.c)
ph.B<-qt(1-(.1/4),ph.n-2)
lm(ph$Y~ph$X)
168.600 - sqrt(ph.mse*( (1/ph.n) + (mean(ph$X)^2) /
(sum((ph$X-mean(ph$X))^2)) ))*ph.B
168.600 +sqrt(ph.mse*( (1/ph.n) + (mean(ph$X)^2) /
(sum((ph$X-mean(ph$X))^2)) ))*ph.B
2.034 - sqrt(ph.mse / sum((ph$X-mean(ph$X))^2) ) * ph.B
2.034 + sqrt(ph.mse / sum((ph$X-mean(ph$X))^2) ) * ph.B
|
5cc8f4046030b5fa642ff3dad56b253ace14b18c | 98b87f6e7e180948960cacd5dab7080914c16984 | /man/checknames.Rd | 4b7f81218ddf3ec1954697658b0c9cae24b385bd | [] | no_license | alexpkeil1/qgcomp | ef1543e24d64ce3afcf5c6385904e3875d14ab5f | b09d7082bd1a3ea7508a0a2954d7351dfb2243f8 | refs/heads/main | 2023-08-17T21:12:52.820929 | 2023-08-10T12:27:31 | 2023-08-10T12:27:31 | 154,714,135 | 16 | 6 | null | null | null | null | UTF-8 | R | false | true | 779 | rd | checknames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_utility.R
\name{checknames}
\alias{checknames}
\title{Check for valid model terms in a qgcomp fit}
\usage{
checknames(terms)
}
\arguments{
\item{terms}{model terms from attr(terms(modelfunction, data), "term.labels")}
}
\description{
This is an internal function called by \code{\link[qgcomp]{qgcomp}},
\code{\link[qgcomp]{qgcomp.glm.boot}}, and \code{\link[qgcomp]{qgcomp.glm.noboot}},
but is documented here for clarity. Generally, users will not need to call
this function directly. This function tries to determine whether there are
non-linear terms in the underlying model, which helps infer whether the
appropriate function is called, and whether more explicit function calls
are needed.
}
|
79f6dd8b4960dc8e47cc38eec5e0c17f302a58f6 | 150ddbd54cf97ddf83f614e956f9f7133e9778c0 | /man/subset.stimlist.Rd | 10c2ce645976a80bad3e50f1c20a022d54d10059 | [
"CC-BY-4.0"
] | permissive | debruine/webmorphR | 1119fd3bdca5be4049e8793075b409b7caa61aad | f46a9c8e1f1b5ecd89e8ca68bb6378f83f2e41cb | refs/heads/master | 2023-04-14T22:37:58.281172 | 2022-08-14T12:26:57 | 2022-08-14T12:26:57 | 357,819,230 | 6 | 4 | CC-BY-4.0 | 2023-02-23T04:56:01 | 2021-04-14T07:47:17 | R | UTF-8 | R | false | true | 626 | rd | subset.stimlist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{subset.stimlist}
\alias{subset.stimlist}
\title{Subset Stimulus Lists}
\usage{
\method{subset}{stimlist}(x, subset, ...)
}
\arguments{
\item{x}{list of stimuli}
\item{subset}{a character string to use as a pattern for searching stimulus IDs, or a logical expression indicating elements or rows to keep: missing values are taken as false.}
\item{...}{further arguments to be passed to or from other methods.}
}
\value{
list of stimuli
}
\description{
Returns a subset of the stimulus list meeting the condition.
}
\keyword{internal}
|
d690aee6ac826ada1483f8b9964adb6fcc13315a | 0f5084767f7d2a01d9fe63b6a5b128d85654bfde | /Rcode-dataGeneration/writeData.R | 99d789364a1006e7f01cc941c2fc17d68525f6c8 | [] | no_license | aroumpel/CausalConsistency | c755c59393076d82fd9fd0d5197b01033032663f | f947ec2a9618d9e8bbec3031f83229f2e4ea4f1a | refs/heads/master | 2019-01-16T05:11:52.798897 | 2016-12-04T14:10:22 | 2016-12-04T14:10:22 | 73,605,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,020 | r | writeData.R | #Requires dataset to be loaded (for example marginalsFCI.RData)
#Also, folder must be empty...
nDags <- length(marginalsFCI)
nNodes <- 20;
nMargs <- 100;
numRemoved <- 2;
folder <- "resultsFCImajrule"
doSepSet <- "TRUE"
dir.create(paste0(folder,"/magsMarginals",numRemoved));
dir.create(paste0(folder,"/pagsMarginals",numRemoved));
dir.create(paste0(folder,"/sepSetsMarginals",numRemoved));
for (i in 1:nDags) {
for (j in 1:length(marginalsFCI[[i]])) { #marginalsFCI[[i]][[1]]
curMarg <- marginalsFCI[[i]][[j]] #marginalsFCI[[i]][[1]][[j]]
pagNew <- curMarg$pag
magNew <- curMarg$mag
rm <- curMarg$varsRm
write.table(magNew,file=paste0(folder,"/magsMarginals",numRemoved,"/mags",i,".txt"), append=TRUE, row.names = FALSE, col.names=FALSE)
write.table(pagNew@amat,file=paste0(folder,"/pagsMarginals",numRemoved,"/pags",i,".txt"), append=TRUE, row.names = FALSE, col.names=FALSE)
write.table(t(rm),file=paste0(folder,"/pagsMarginals",numRemoved,"/varsRemoved",i,".txt"), append=TRUE, row.names = FALSE, col.names=FALSE)
if (doSepSet==TRUE) {
#write the separating sets (for every pag we get all pairs of variables(one line) and write the separating set (0 is its empty))
for (iX in 1:(nNodes-numRemoved)) {
sepMatrix <- matrix(0,nNodes-numRemoved,nNodes-numRemoved)
currSet <- pagNew@sepset[[iX]]
if (is.numeric(currSet)) { # true when we look for sepSet(i,i)
write.table(sepMatrix,file=paste0(folder,"/sepSetsMarginals",numRemoved,"/sepSets",i,".txt"),append=TRUE,row.names = FALSE,col.names = FALSE)
next
}
for (iY in 1:(nNodes-numRemoved)) {
sepSet <- currSet[[iY]]
sepMatrix[iY,c(sepSet)]=1
}
write.table(sepMatrix,file=paste0(folder,"/sepSetsMarginals",numRemoved,"/sepSets",i,".txt"),append=TRUE,row.names = FALSE,col.names = FALSE)
}
}
}
} |
ada597cf8a63a9c2bb42b65d2d323bd08dd4b7d8 | facd83d2c6378682421bb7902191f1f49fa1836f | /man/ionml.read.laicpqms.Rd | 21b93df95136bd349b9aeb327379b2f7180eb5c7 | [] | no_license | misasa/chelyabinsk | cbb9e3acdaaefb3254d01d38c42e404164dfa2d1 | 495e8bab926934467a3a7fd7c74bb1be69d2f094 | refs/heads/master | 2021-06-28T09:11:19.766488 | 2020-11-20T00:26:38 | 2020-11-20T00:26:38 | 69,550,402 | 0 | 2 | null | 2019-06-21T10:49:18 | 2016-09-29T09:04:01 | R | UTF-8 | R | false | true | 2,403 | rd | ionml.read.laicpqms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ionml.read.laicpqms.R
\name{ionml.read.laicpqms}
\alias{ionml.read.laicpqms}
\title{Read IONML originated from Analyte G2 with iCAP-Q}
\usage{
ionml.read.laicpqms(pmlame_or_file, t0 = 5, t1 = 20, t2 = 25,
t3 = 60, ref = "Si29", verbose = FALSE, ionml = TRUE)
}
\arguments{
\item{pmlame_or_file}{ion-type pmlame or IONML or QTEGRACSV.}
\item{t0}{When baseline starts (default: 5 s).}
\item{t1}{When baseline ends (default: 20 s).}
\item{t2}{When ion starts (default: 25 s).}
\item{t3}{When ion ends (default: 60 s).}
\item{ref}{reference ion such as `Si29'.}
\item{verbose}{Output debug info (default: FALSE).}
\item{ionml}{Read IONML file instead of IONCSV (default: TRUE).}
}
\value{
The ion-type pmlame of ion-to-ref ratio online with rows of
statistical information.
}
\description{
Read IONML originated from Analyte G2 with iCAP-Q.
When IONML is not found, this will create IONML from QTEGRACSV
(CSV file exported from Qtegra with identical basename with
IONML) via IONCSV stored in temporal dirctory.
The IONCSV consists of columns of time and ion intensities. The
first column of each line should be number of `cycle'. Colname
of the IONCSV should be `time' and name of element followed by
atomic weight (`Si29' instead of `29Si').
This is a fork from `Batch_calc_separated_pdf_2.3.R'. On
2018-07-27, `summarise_each' was replaced by `summarise_all'.
}
\details{
Signal between `t2' and `t3' is regarded as main signal
from a sample (online). Signal between `t0' and `t1' is regarded
as background. Mean of latter is calculated as BASELINE. Then
the BASELINE is subtracted from the main signal. The main signal
is normalized by `ref'. This function returns the BASELINE
subtracted and reference normalized `ionic-ratio' with
statistical information at the bottom of the table. Detection
limit is defined by 3 times standard error of BASELINE measument.
}
\examples{
ionmlfile <- cbk.path("ref_cpx_klb1@1.xml")
message(sprintf("The ionmlfile is located at |\%s|.",ionmlfile))
pmlfile0 <- ionml.read.laicpqms(ionmlfile)
file <- cbk.path("ref_cpx_klb1@1.ion")
message(sprintf("The file is located at |\%s|.",file))
pmlfile0 <- ionml.read.laicpqms(file,ionml=FALSE)
}
\seealso{
\code{\link{ionml.convert.qtegracsv2ioncsv}}
\code{\link{ionml.convert.ioncsv}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.