blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
176125b3a58bcbf156c7eae4e34fe7ba2278dc9f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/semantic.dashboard/examples/notification_item.Rd.R
|
56a2bf8c958566bc3473cf04793b7f621ccdb05a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 223
|
r
|
notification_item.Rd.R
|
library(semantic.dashboard)
### Name: notification_item
### Title: Create a notification item.
### Aliases: notification_item notificationItem
### ** Examples
notificationItem("This is notification!", color = "red")
|
022c60a347e20b3b7e734ae698b99aa3c5ce910f
|
3e643d92b967dca3f43517920681b5ea7285c9a1
|
/R/searcherAddin.R
|
afebac259928f663e874a1d8c19d46556c50a82f
|
[] |
no_license
|
will-r-chase/searcher2
|
e74891d09cc5649a1488a8758d11d6fda92749ce
|
82ee823097d6e34f63d1cab1b3d67027d353bbcb
|
refs/heads/master
| 2020-08-29T18:54:31.173255
| 2019-10-28T20:22:38
| 2019-10-28T20:22:38
| 218,138,064
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
searcherAddin.R
|
#' Search google addin
#'
#' Search google for the selected word w/ the `searcher` package
#'
#' @return
#' @import searcher rstudioapi
#' @export
#'
#' @examples
searcherAddin <- function() {
doc <- rstudioapi::getActiveDocumentContext()
searcher::search_google(query = doc$selection[[1]]$text)
}
|
0b1e128a62b1a94e24eb28e9a20d09ba2cec4fc3
|
0913ef989631d5fbb6461667563bb13c102d22c8
|
/man/import.info.Rd
|
f24e84935cd4b56b393768b9ae9dc601b892dddb
|
[] |
no_license
|
oucru-biostats/C306
|
57c8b501a106384f101d20c54bdcbc54d99c8bdf
|
7d30d14b081ba64b32fc47ac985bc45ad2672f70
|
refs/heads/master
| 2022-05-01T09:33:44.355345
| 2022-04-28T11:42:23
| 2022-04-28T11:42:23
| 203,103,489
| 0
| 2
| null | 2019-11-07T05:46:23
| 2019-08-19T05:21:54
|
R
|
UTF-8
|
R
| false
| true
| 694
|
rd
|
import.info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OUCRU_data_dictionary.R
\name{import.info}
\alias{import.info}
\title{Import OUCRU's data dictionary into R}
\usage{
import.info(table_name, input, output)
}
\arguments{
\item{table_name}{a character vector specifies names of Excel sheets (in OUCRU's data dictionary) to be imported.}
\item{input}{a character value specifies path to the OUCRU's data dictionary file (in Excel format).}
\item{output}{a character value specifies where to save imported data.}
}
\value{
Nothing (imported data will be saved in the pre-defined output directory)
}
\description{
A function to import OUCRU's data dictionary into R.
}
|
8fe7be0f41cdb28822fbb107762b24d5b243ecb5
|
167e2d9523f4fe3b0752654a5f26259f36bcc214
|
/script/IOT1.R
|
eddab58b49d2587f04ea054cccaf20775c7f50e2
|
[] |
no_license
|
UbiqumCodeAcademy/IOT1
|
5b391b0734519c4d8315fb661a4e64b996b1b477
|
8f6f1fdc76588c2cb506172b9eab2797a3c12730
|
refs/heads/master
| 2020-05-25T13:14:33.402906
| 2019-05-21T10:48:26
| 2019-05-21T10:48:26
| 187,817,113
| 0
| 0
| null | 2019-05-21T10:39:16
| 2019-05-21T10:39:16
| null |
UTF-8
|
R
| false
| false
| 18,395
|
r
|
IOT1.R
|
# General comments
# take care with variable names
#
# Settings -----
pacman::p_load(chron, dplyr, plyr, RMySQL, lubridate, ggplot2, reshape2,
quantmod, scales, RColorBrewer, sqldf, ggfortify, tidyr,
compareDF, reshape, rstudioapi, stringi, plotly, padr,
DescTools, anytime, ggfortify, forecast, tslm)
current_path <- getActiveDocumentContext()$path
setwd(dirname(dirname(current_path)))
rm(current_path)
## Create a database connection
con = dbConnect(MySQL(), user='deepAnalytics', password='Sqltask1234!',
dbname='dataanalytics2018',
host='data-analytics-2018.cbrosir2cswx.us-east-1.rds.amazonaws.com')
# Data set INFORMATION -----
## sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
## sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
## sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
# Load data-----
j <- c("yr_2006", "yr_2007", "yr_2008", "yr_2009", "yr_2010") # Should it be only from 2007 to 2009?
df <- c()
for (i in 1:length(j)) {
X <- dbGetQuery(con,
paste("SELECT * FROM ",
j[i]))
df <- rbind(df,X)
}
rm(X, i, j)
df$id <- NULL
df <- df %>% dplyr::rename(kitchen = Sub_metering_1, laundry = Sub_metering_2,
conditioning = Sub_metering_3)
head(df)
tail(df)
## Combine Date and Time attribute values in a new attribute column ----
df <- cbind(df,paste(df$Date,df$Time), stringsAsFactors = FALSE)
colnames(df)[10] <- "DateTime"
df <- df[,c(ncol(df), 1:(ncol(df)-1))]
df$DateTime <- as.POSIXct(df$DateTime, "%Y/%m/%d %H:%M:%S")
attr(df$DateTime, "tzone") <- "Europe"
df <- df %>% mutate(Total_Power = Global_active_power + Global_reactive_power)
summary(df$Total_Power) # Because the highest "Total Power" is 11.3 kW/h, the power hiredmust be up to 12 kVA
# Only 0.65 % of the time the total power needed is higher than 5.5 kVA. By hiring 6kVA, customer will reduce the power bill up to 50€ a year.
summary(df$Global_intensity)
summary(df$Voltage) # Voltage is a bit higher than expected, as the standard voltage in France is 230V
df$year <- year(df$DateTime) # To filter in or out the years
# check if there are any time gap----
df$gap <- c(NA, with(df, DateTime[-1] - DateTime[-nrow(df)]))
which(df$gap > 1)
x1 <- df[(c((which(df$gap > 3)-1),(which(df$gap > 3)))),1]
x1 <- as.data.frame(x1)
rm(x1)
df$gap <- NULL
# PAD function (package PADR) to "fill the gaps" with NAs----
df1 <- rbind(df %>% filter(year == 2007) %>%
pad(), df %>% filter(year == 2008) %>%
pad(), df %>% filter(year == 2009) %>%
pad(), df %>% filter(year == 2010) %>%
pad())
df1$year <- NULL
# Fill NAs with data ----
# For the ones that are less than three minutes:
for (i in 4:ncol(df1)){
df1[ ,i] <- na.locf(df1[ ,i], maxgap = 3)
} #We consider that the 3 min gap is the time the meters and submeters need for software updates.
# For all the others
for (i in 4:ncol(df1)) {
df1[which(is.na(df1[ ,i]) == TRUE), i] <- Mode(df1[ ,i])
}
# Create attributes from "DateTime" ----
df1$Date <- date(df1$DateTime)
df1$year <- year(df1$DateTime)
df1$month <- month(df1$DateTime)
df1$day <- day(df1$DateTime)
df1$weekday <- weekdays.POSIXt(df1$DateTime)
df1$week <- week(df1$DateTime)
df1$hour <- hour(df1$DateTime)
df1$minute <- minute(df1$DateTime)
df1$quarter <- quarter(df1$DateTime)
df1$Time <- strftime(df1$DateTime,format="%H:%M:%S")
df1$Time2 <- as.numeric(hms(df1$Time))
df1$yearmonth <- as.yearmon(df1$DateTime)
# z <- zoo(1:nrow(df1), as.POSIXct(c(df1$DateTime)))
# g <- seq(start(z), end(z), by = "min")
# na.locf(z, xout = g)
## Power Fares----
# Off-peak time is between 02:00 and 07:00; and between 14:00 and 17:00
# Off-peak price per kWh is 0,1230 €
# Peak time is between 17:00 and 02:00; and between 07:00 and 14:00
# Peak Price per kWh is 0,1580 €
normal_fare <- read.csv("dataset/NormalFares.csv")
peak_fare <- read.csv("dataset/PeakFares.csv")
## Creating a new variable for "Peak" consumes ----
x <- as.numeric(hms(c("02:00:00", "07:00:00", "14:00:00", "17:00:00")))
df1$tariff <- ifelse(df1$Time2 > x[1] & df1$Time2 < x[2] | df1$Time2 > x[3] &
df1$Time2 < x[4], "valey", "peak")
rm(x)
# Splitting the data by day ----
df2 <- df1 %>% filter(year > 2006) %>% group_by(Date, Time) %>%
summarise(x = sum(Global_active_power/60))
df2 <- df2 %>% group_by(Date) %>% summarise(Energy = sum(x))
df2$year <- year(df2$Date)
df2$month <- month(df2$Date)
df2$monthf <- factor(df2$month, levels = as.character(1:12),
labels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"),
ordered = TRUE)
df2$week <- week(df2$Date)
df2$yearmonth <- as.yearmon(df2$Date)
df2$yearmonthf <- as.character(df2$yearmonth)
df2$weekday <- as.POSIXlt(df2$Date)$wday
df2$weekdayf <- factor(df2$weekday, levels = rev(0:6),
labels = rev(c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri",
"Sat")), ordered = TRUE)
df2$Date <- date(df2$Date)
df2 <- ddply(df2,.(yearmonthf),transform,monthweek=1+week-min(week))
ggplot(df2, aes(monthweek, weekdayf, fill = Energy)) +
geom_tile(colour = "white") + facet_grid(year~monthf) +
scale_fill_gradient(low="gold", high="red") +
ggtitle("Total Power Consume") + xlab("Week of Month") + ylab("") +
theme_bw()
#Converting it to a Time Series
df2ts <- ts(df2$Energy, frequency = 365, start = c(2007,1))
autoplot(df2ts)
## Applying time series linear regression to the Time series:
fit_df2 <- tslm(df2ts ~ trend + season)
summary(fit_df2)
plot(forecast(fit_df2, h=5))
checkresiduals(fit_df2)
CV(fit_df2) # Horrible!
## Decomposing the Time series:
decomposed_df2ts <- decompose(df2ts)
plot(decomposed_df2ts)
summary(decomposed_df2ts)
decomposed_df2ts$random
x <- stl(df2ts, "periodic")
seasonal_stl_df2ts <- x$time.series[,1]
trend_stl_df2ts <- x$time.series[,2]
random_stl_df2ts <- x$time.series[,3]
y <- (sum(abs(x$time.series[,3])))/nrow(df2) # Absolute Mean Error
# Now by month ----
df3 <- df2 %>% group_by(yearmonth) %>% summarise(Energy = sum(Energy))
df3$year <- year(df3$yearmonth)
df3$month <- month(df3$yearmonth)
df3$monthf <- factor(df3$month, levels = as.character(1:12),
labels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"),
ordered = TRUE)
ggplot(df3, aes(x = yearmonth, y = Energy)) +
geom_line(color = "#00AFBB", size = 1) + ylab("Energy (kW/h)") +
xlab("Year") + ggtitle("Energy consumed per month")
df3$Year <- as.factor(df3$year)
ggplot(df3, aes(x = monthf, y = Energy, colour = Year, group = Year)) +
geom_line() + ylab("Energy (kW/h)") + xlab("Month") +
ggtitle("Energy consume per month") + geom_point()
#Converting it to a Time Series
df3ts <- ts(df3$Energy, frequency = 12, start = c(2007,1))
plot.ts(df3ts)
autoplot(df3ts)
## Applying time series linear regression to the Time series:
fit_df3 <- tslm(df3ts ~ trend + season)
summary(fit_df3)
plot(forecast(fit_df3, h=5, level=c(80,90)))
checkresiduals(fit_df3)
CV(fit_df3)
## Decomposing the Time series:
decomposed_df3ts <- decompose(df3ts)
plot(decomposed_df3ts)
summary(decomposed_df3ts)
decomposed_df3ts$random
x <- stl(df3ts, "periodic")
seasonal_stl_df3ts <- x$time.series[,1]
trend_stl_df3ts <- x$time.series[,2]
random_stl_df3ts <- x$time.series[,3]
y <- (sum(abs(x$time.series[,3])))/nrow(df3) # Absolute Mean Error
## Applying Holt-Winters to the Time series:
adjusted_df3ts <- df3ts - decomposed_df3ts$seasonal
autoplot(adjusted_df3ts)
plot(decompose(adjusted_df3ts))
df3ts_HW <- HoltWinters(adjusted_df3ts, beta=FALSE, gamma=FALSE)
plot(df3ts_HW, ylim = c(575, 950))
## Forecast Holt-Winters:
df3ts_HW_forecast<- forecast(df3ts_HW, h=5)
plot(df3ts_HW_forecast)
# Now by month and tariff ----
df4 <- df1 %>% group_by(Date, hour, tariff) %>% summarise(sum_total_power =
sum(Global_active_power/60))
df4$yearmonth <- as.yearmon(df4$Date)
df4$PeakPower <- ifelse(df4$tariff == "peak", (df4$sum_total_power), 0)
df4$OffPeakPower <- ifelse(df4$tariff == "valey", (df4$sum_total_power), 0)
df4$PeakCost <- df4$PeakPower * peak_fare$Peak_Price_per_kWh[3]
df4$OffPeakCost <- df4$OffPeakPower * peak_fare$Off_Peak_Price_per_kWh[3]
df4$variable_tariff_cost <- df4$PeakCost + df4$OffPeakCost
df4 <- df4 %>% group_by(yearmonth) %>%
summarise(Monthly_fare = sum(variable_tariff_cost) +
peak_fare$Subscription_price[3]/12)
head(df4)
df4$year <- year(df4$yearmonth)
df4$month <- month(df4$yearmonth)
ggplot(df4, aes(x = yearmonth, y = Monthly_fare)) +
geom_line(color = "#01EC13", size = 1) + ylab("Euros") +
xlab("") + ggtitle("Peak/Valey tariff monthly fares")
df5 <- df1 %>% group_by(yearmonth) %>% summarise(sum_total_power =
sum(Global_active_power/60))
head(df5)
df5$Monthly_fare <- df5$sum_total_power * normal_fare$Price._per_kWh[4] +
(normal_fare$Subscrition_price[4]/12)
df5$year <- year(df5$yearmonth)
df5$month <- month(df5$yearmonth)
ggplot(df5, aes(x = yearmonth, y = Monthly_fare)) +
geom_line(color = "#FF5733", size = 1) + ylab("Euros") +
xlab("") + ggtitle("Normal tariff monthly fares")
df5$sum_total_power <- NULL
df4$Tariff <- "Peak/Valey Tariff"
df5$Tariff <- "Normal Tariff"
comparison <- compare_df(df4, df5, "yearmonth")
comparison$comparison_df
comparison$html_output
df6 <- as.data.frame(rbind(df4,df5))
df6$year <- NULL
df6$month <- NULL
ggplot(data = df6, aes(x = yearmonth, y = Monthly_fare)) +
geom_col(color = "blue", fill = "lightblue") + ylab("Monthly bill (€)") +
facet_wrap(Tariff~., scales = "free") + xlab("Year")
ggplot(df6, aes(x = yearmonth, y = Monthly_fare, colour = Tariff)) +
geom_line() + ylab("Monthly bill (€)") + xlab("Year")
#Converting it to a Time Series
df4ts <- ts(df4$Monthly_fare, frequency = 12, start = c(2007,1))
df5ts <- ts(df5$Monthly_fare, frequency = 12, start = c(2007,1))
plot.ts(df3ts)
autoplot(df4ts)
df7 <- as.data.frame(cbind(df4,df5))
df7[ ,c(3, 4, 6, 8, 9, 10)] <- NULL
colnames(df7) <- c("yearmonth", "Monthly_fare", "Tariff", "Normal_Monthly_fare")
df7$ratio <- round((df7$Monthly_fare/df7$Normal_Monthly_fare)*100,2)
df8 <- as.data.frame(cbind(df4,df5))
df8[ ,c(3, 4, 5, 6, 8, 9)] <- NULL
colnames(df8) <- c("yearmonth", "Monthly_fare", "Normal_Monthly_fare", "Tariff")
df8$ratio <- round((df8$Normal_Monthly_fare/df8$Normal_Monthly_fare)*100,2)
df8 <- df8[ ,c(1, 2, 4, 3, 5)]
df9 <- as.data.frame(rbind(df7,df8))
rm(df4, df5, df7, df8)
df9$Monthly_fare <- NULL
df9$Normal_Monthly_fare <- NULL
ggplot(df9, aes(x = yearmonth, y = ratio, colour = Tariff)) +
geom_line() + ylab("Normal fare ratio") + xlab("Year")
# Plotting some weeks ----
df7 <- df2 %>% filter(year == 2007, month == 3) %>%
group_by(weekday, week, Date) %>%
summarise(Energy = sum(Energy))
df7$week <- stringi::stri_datetime_fields(df7$Date)$WeekOfMonth
df7$weekdayx <- factor(df7$weekday, labels = c(7, 1, 2, 3, 4, 5,
6), ordered = TRUE)
df7$weekdayf <- factor(df7$weekdayx, levels = (1:7),
labels = c("Mon", "Tue", "Wed", "Thu", "Fri",
"Sat", "Sun"), ordered = TRUE)
ggplot(df7, aes(x = weekdayf, y = Energy, colour = week, group = week)) +
geom_line() + geom_point() + ylab("Energy (kW/h)") + xlab("Week day") +
ggtitle("Energy consume per day")
# Plotting by submeters----
df8 <- df %>% filter(year == 2007) %>% group_by(Date, Time) %>%
summarise(kitchen_energy = sum(kitchen/1000), laundry_energy = sum(laundry/1000),
conditioning_energy = sum(conditioning/1000))
df8$yearmonth <- as.yearmon(df8$Date)
df8 <- df8 %>% group_by(yearmonth) %>%
summarise(kitchen_energy = sum(kitchen_energy),
laundry_energy = sum(laundry_energy),
conditioning_energy = sum(conditioning_energy))
df8$year <- year(df8$yearmonth)
df8$month <- as.integer(month(df8$yearmonth))
df8$monthf <- factor(df8$month, levels = as.character(1:12),
labels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"),
ordered = TRUE)
df8$yearmonthf <- as.character(df8$yearmonth)
ggplot(df8, aes(x = monthf, group = 1)) +
geom_line(aes(y = kitchen_energy), color = "#1A237E") +
geom_line(aes(y = conditioning_energy), color = "#C62828") +
geom_line(aes(y = laundry_energy), color = "#2E7D32") +
ylab("Energy (kW/h)") + xlab("Month") + ggtitle("Energy consumed per submeter")
# Plotting for a representative day ----
df9 <- df1 %>% group_by(DateTime, weekday) %>%
summarise(kitchen_energy = sum(kitchen/1000), laundry_energy = sum(laundry/1000),
conditioning_energy = sum(conditioning/1000),
Global_Energy = sum(Global_active_power/60))
df9$hour <- hour(df9$DateTime)
df9$day <- day(df9$DateTime)
df9 <- df9 %>% group_by(day, hour) %>%
summarise(kitchen_energy = round(sum(kitchen_energy),0),
laundry_energy = round(sum(laundry_energy),0),
conditioning_energy = round(sum(conditioning_energy),0),
Global_Energy = round(sum(Global_Energy),0))
df9 <- df9 %>% group_by(hour) %>%
summarise(kitchen_energy = getmode(kitchen_energy),
laundry_energy = getmode(laundry_energy),
conditioning_energy = getmode(conditioning_energy),
Global_Energy = getmode(Global_Energy))
df9$Energy_no_submetered <- (df9$Global_Energy - df9$kitchen_energy -
df9$laundry_energy - df9$conditioning_energy)
df9$Hour <- factor(df9$hour, levels = as.character(0:23),
labels = c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "20", "21", "22", "23"),
ordered = TRUE)
plot_ly(df9, x = df9$Hour, y = df9$kitchen_energy, name = "Kitchen",
type = "scatter", mode = "lines") %>%
add_trace(y = df9$conditioning_energy, name = "Conditioning",
mode = "lines") %>%
add_trace(y = df9$laundry_energy, name = "Laundry", mode = "lines") %>%
add_trace(y = df9$Energy_no_submetered, name = "Not Submetered",
mode = "lines") %>%
add_trace(y = df9$Global_Energy, name = "Global", mode = "lines") %>%
layout(title = "Representative day of Energy consumed per submeter",
xaxis = list(title = "Time"), yaxis = list(title = "Energy (kW/h)"))
# Data splitting ----
trainSet <- window(df3ts, 2007, c(2009,12))
df3fit1 <- meanf(trainSet,h=12)
df3fit2 <- rwf(trainSet,h=12)
df3fit3 <- snaive(trainSet,h=12)
testSet <- window(df3ts, 2010)
accuracy(df3fit1, testSet)
accuracy(df3fit2, testSet)
accuracy(df3fit3, testSet)
gglagplot(df3ts) # The relationship is strongly positive at lag 12, reflecting the strong seasonality in the data.
ggAcf(df3ts, lag=24)
ggsubseriesplot(df3ts)
ggseasonplot(df3ts, year.labels=TRUE, year.labels.left=TRUE) +
ylab("Power consumed (kW/h)") +
ggtitle("Seasonal plot: Monthly power consume")
autoplot(window(df3ts, start=2007)) +
autolayer(df3fit1, series="Mean", PI=FALSE) +
autolayer(df3fit2, series="Naïve", PI=FALSE) +
autolayer(df3fit3, series="Seasonal naïve", PI=FALSE) +
xlab("Year") + ylab("Energy (kW/h)") +
ggtitle("Forecasts for monthly power consume") +
guides(colour=guide_legend(title="Forecast"))
trainSet <- window(df2ts, 2007, c(2010,300))
df2fit1 <- meanf(trainSet,h=65)
df2fit2 <- rwf(trainSet,h=65)
df2fit3 <- snaive(trainSet,h=65)
testSet <- window(df2ts, c(2010,300))
accuracy(df2fit1, testSet)
accuracy(df2fit2, testSet)
accuracy(df2fit3, testSet)
autoplot(window(df2ts, start = c(2009,300))) +
autolayer(df2fit1, series="Mean", PI=FALSE) +
autolayer(df2fit2, series="Naïve", PI=FALSE) +
autolayer(df2fit3, series="Seasonal naïve", PI=FALSE) +
xlab("Year") + ylab("Energy (kW/h)") +
ggtitle("Forecasts for monthly power consume") +
guides(colour=guide_legend(title="Forecast"))
# IDEA ----
df4 <- df1 %>% filter(year > 2006) %>% group_by(Date, Time, hour, month, year) %>%
summarise(x = sum(Global_active_power/60))
df4 <- df4 %>% group_by(Date, hour, month, year) %>% summarise(x = sum(x))
df4 <- df4 %>% group_by(year, month, hour) %>% summarise(GAP_mean = mean(x))
df4ts <- ts(df4$GAP_mean, frequency = 24*12, start = c(2007,1))
autoplot(df4ts)
df4ts_forecast<- forecast(df4ts, h=24)
plot(df4ts_forecast)
trainSet <- window(df4ts, 2007, c(2010,24*9))
df4fit1 <- meanf(trainSet,h=240)
df4fit2 <- rwf(trainSet,h=240)
df4fit3 <- snaive(trainSet,h=240)
testSet <- window(df4ts, c(2010,24*9))
accuracy(df4fit1, testSet)
accuracy(df4fit2, testSet)
accuracy(df4fit3, testSet)
# IDEA 2----
df5 <- df1 %>% filter(year > 2006) %>% group_by(Date, Time, hour, quarter, year) %>%
summarise(x = sum(Global_active_power/60))
df5 <- df5 %>% group_by(Date, hour, quarter, year) %>% summarise(x = sum(x))
df5 <- df5 %>% group_by(year, quarter, hour) %>% summarise(GAP_mean = mean(x))
df5ts <- ts(df5$GAP_mean, frequency = 24*4, start = c(2007,1))
autoplot(df5ts)
df5ts_forecast<- forecast(df5ts, h=24)
plot(df5ts_forecast)
trainSet <- window(df5ts, 2007, c(2009,48))
df5fit1 <- meanf(trainSet,h=48)
df5fit2 <- rwf(trainSet,h=48)
df5fit3 <- snaive(trainSet,h=48)
testSet <- window(df5ts, c(2009,48))
accuracy(df5fit1, testSet)
accuracy(df5fit2, testSet)
accuracy(df5fit3, testSet)
e <- tsCV(df3ts, rwf, drift=TRUE, h=1)
e2 <- tsCV(df3ts, snaive, drift=TRUE, h=1)
sqrt(mean(e^2, na.rm=TRUE))
sqrt(mean(e2^2, na.rm=TRUE))
sqrt(mean(residuals(rwf(df3ts, drift=TRUE))^2, na.rm=TRUE))
sqrt(mean(residuals(snaive(df3ts, drift=TRUE))^2, na.rm=TRUE))
|
da79cc3ea403121e0516c364f8169a94ee49b998
|
1a9ef448017a28bfffdfb78887022b46a6169507
|
/man/text2times.Rd
|
bd282dc8638e31fbd57eea388fdc6992ac9c6c66
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
rtelmore/RDSTK
|
4ae28abbb12c937141c5a834dc46a010799d0f15
|
cdddc4d3647281155067bb434f54f12c27fdd3aa
|
refs/heads/master
| 2021-01-21T11:45:00.044495
| 2017-11-20T20:47:30
| 2017-11-20T20:47:30
| 1,686,614
| 19
| 12
| null | 2017-03-16T18:52:10
| 2011-05-01T02:08:32
|
R
|
UTF-8
|
R
| false
| true
| 1,426
|
rd
|
text2times.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text-to-times.R
\name{text2times}
\alias{text2times}
\title{Parses a text string for time information.}
\usage{
text2times(text, session = RCurl::getCurlHandle())
}
\arguments{
\item{text}{A text string containing possible time information.}
\item{session}{The CURLHandle object giving the structure for the options
and that will process the command. For curlMultiPerform, this is an object
of class code MultiCURLHandle-class.}
}
\value{
A data.frame containing
\item{duration}{Length of time in seconds of the recognized event.}
\item{start_index}{The beginning of the matched string in the original string.}
\item{is_relative}{Logical value for matched string.}
\item{end_index}{The end of the matched string in the original string.}
\item{time_seconds}{The unix timestamp of the event (time since epoch).}
\item{matched_string}{The string that was used in the processing of the request.}
\item{time_string}{The time string of the recognized time event.}
}
\description{
This function take a text string and returns any time-specific information
that it finds.
}
\examples{
\dontrun{
text <- "02/01/2010, Meeting this Wednesday"
text2times(text)
}
}
\references{
http://www.datasciencetoolkit.org/developerdocs#text2times
}
\seealso{
\code{\link{curlPerform}},
\code{\link{getCurlHandle}},
\code{\link{dynCurlReader}}
}
|
73f03c5e6b15f57a70a9e13f3c381c5fd16f7624
|
22d9614d91d5cac0a14512eabbf477aa30e2acd7
|
/_episodes_rmd/tests/test_fail.R
|
dd6b3c84e6c9c908763abc213b1ef62a7b899d6c
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mawds/r-programming-intro
|
78890568a2ac29e04e10d83ed1f94f6a79cb816c
|
afe759c8974644ece3af1ef943e9895e9bc18c09
|
refs/heads/gh-pages
| 2021-09-14T09:40:21.439999
| 2018-05-11T13:15:29
| 2018-05-11T13:15:29
| 110,844,421
| 0
| 1
| null | 2018-02-01T13:01:08
| 2017-11-15T14:32:46
|
Python
|
UTF-8
|
R
| false
| false
| 155
|
r
|
test_fail.R
|
context("Cleaning fields")
test_that("Can clean a field", {
testvector <- c(1,2,-999.99)
expect_equal(c(2,3,NA), cleanfield(testvector))
})
|
f933d22f5a3766a176cc7c3cc3a8885de82f6f14
|
c0bc3866a2b44320138e6dd8f26f775620f43851
|
/Q1.R
|
aa73ebb3edcfffe4f750ccee0a4168384dd41ffc
|
[] |
no_license
|
navaneethreddymatta/Defect_Data_Analysis_for_Defect_Projection
|
580be44d1186533b9c73e3457a76c6e00ff421a8
|
38f0da3756c45f0a79795068548118a64e045f3d
|
refs/heads/master
| 2021-01-21T18:10:40.443154
| 2016-09-16T06:12:28
| 2016-09-16T06:12:28
| 68,357,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,817
|
r
|
Q1.R
|
currdir <- getSrcDirectory(function(x) {x})
maindir <- paste(currdir, "/Datasets", sep="")
subdir <- list.files(maindir)
jsd = function(x, y) {
maxlen = max(length(x), length(y))
p = rep(0, maxlen)
for (i in 1:length(x)) p[i] = x[i]
p[p==0] = 0.1
p = p/sum(p)
q = rep(0, maxlen)
for (i in 1:length(y)) q[i] = y[i]
q[q==0] = 0.1
q = q/sum(q)
m = 0.5 * (p + q)
0.5 * (sum(p * log2(p / m)) + sum(q * log2(q / m)))
}
for(f in 1:3){
if(subdir[f]=="Eclipse"||subdir[f]=="JetSpeed-2" || subdir[f]=="Tomcat") {
value = 1
list1 <- list()
setwd(file.path(maindir,paste(subdir[f])))
files = dir(getwd(),pattern=".txt",recursive=TRUE)
print(files)
if(subdir[f]=="Eclipse"){
data.list<-list()
for(k in 1:6){
filename = paste('E',k,'.txt', sep='')
data.list[[k]] = scan(filename)
}
for ( i in 1:5){
for (j in (i+1):6){
list1[value]=jsd(data.list[[i]],data.list[[j]])
value=value+1
}
}
boxplot(unlist(list1),ylab="JSD Score",xlab="Versions",main=subdir[f],col=c("cyan"))
}
if(subdir[f]=="JetSpeed-2"){
data.list1<-list()
for(k in 1:4){
filename = paste('J',k,'.txt', sep='')
data.list1[[k]] = scan(filename)
}
for ( i in 1:3){
for (j in (i+1):4)
{
list1[value]=jsd(data.list1[[i]],data.list1[[j]])
value=value+1
}
}
boxplot(unlist(list1),ylab="JSD Score",xlab="Versions",main=subdir[f],col=c("limegreen"))
}
if(subdir[f]=="Tomcat"){
data.list3<-list()
for(k in 1:4){
filename = paste('T',k,'.txt', sep='')
data.list3[[k]] = scan(filename)
}
for ( i in 1:3){
for (j in (i+1):4){
list1[value]=jsd(data.list3[[i]],data.list3[[j]])
value=value+1
}
}
boxplot(unlist(list1),ylab="JSD Score",xlab="Versions",main=subdir[f],col=c("firebrick"))
}
}
}
|
a20fc69fd8b01e8c4dec4ee68700be943994fa4e
|
caf6f15fe311eb9e8a70465054b2f47d6729cfc0
|
/man/kappa4nlsBoot.Rd
|
cc2caab6b124a262b6eef07fdc84564d07382a37
|
[] |
no_license
|
cran/alR
|
8f4f84656fc2bbed4381444a70f6b442da903d59
|
c9d857d9bfdea576eb3bbe7ca5dcb67ed7a5728b
|
refs/heads/master
| 2021-01-21T12:46:29.775345
| 2017-12-07T11:02:24
| 2017-12-07T11:02:24
| 102,097,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,531
|
rd
|
kappa4nlsBoot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kappa4nlsBoot.R
\name{kappa4nlsBoot}
\alias{kappa4nlsBoot}
\alias{kappa4nlsBoot.default}
\alias{print.kappa4nlsBoot}
\alias{summary.kappa4nlsBoot}
\alias{print.summary.kappa4nlsBoot}
\alias{kappa4nlsBoot.formula}
\alias{predict.kappa4nlsBoot}
\title{Sigmoidal curve fitting.}
\usage{
kappa4nlsBoot(formula, data = list(), xin, lower, upper, tol, maxiter,
bootstraps, bootName, ...)
\method{kappa4nlsBoot}{default}(formula, data = list(), xin, lower = c(0,
-5, -5), upper = c(10, 1, 1), tol = 1e-15, maxiter = 50000, bootstraps,
bootName, ...)
\method{print}{kappa4nlsBoot}(x, ...)
\method{summary}{kappa4nlsBoot}(object, ...)
\method{print}{summary.kappa4nlsBoot}(x, ...)
\method{kappa4nlsBoot}{formula}(formula, data = list(), xin, lower, upper,
tol, maxiter, bootstraps, bootName, ...)
\method{predict}{kappa4nlsBoot}(object, newdata = NULL, ...)
}
\arguments{
\item{formula}{An LHS ~ RHS formula, specifying the linear model to be estimated.}
\item{data}{A data.frame which contains the variables in \code{formula}.}
\item{xin}{Numeric vector of length 3 containing initial values, for \eqn{\sigma}, \eqn{h}, and \eqn{k}.}
\item{lower}{A vector of lower constraints for the parameters to be estimated; defaults to c(0, -5, -5).}
\item{upper}{A vector of upper constraints for the parameters to be estimated; defaults to c(10, 1, 1).}
\item{tol}{Error tolerance level; defaults to 1e-15.}
\item{maxiter}{The maximum number of iterations allowed; defaults to 50000.}
\item{bootstraps}{An integer giving the number of bootstrap samples.}
\item{bootName}{The name of the .rds file to store the kappa4nlsBoot object. May include a path.}
\item{...}{Arguments to be passed on to the differential evolution function \code{\link{JDEoptim}}.}
\item{x}{A kappa4nlsBoot object.}
\item{object}{A kappa4nlsBoot object.}
\item{newdata}{The data on which the estimated model is to be fitted.}
}
\value{
A generic S3 object with class kappa4nlsBoot.
kappa4nlsBoot.default: A list object (saved using \code{saveRDS} in the specified location) with the following components:
\itemize{
\item intercept: Did the model contain an intercept TRUE/FALSE?
\item coefficients: A vector of estimated coefficients.
\item bcoefficients: A vector of bootstrap coefficients, resulting from bootstrap estimation.
\item se: The standard errors for the estimates resulting from bootstrap estimation.
\item error: The value of the objective function.
\item errorList: A vector of values of the objective function for each bootstrap sample.
\item fitted.values: A vector of estimated values.
\item residuals: The residuals resulting from the fitted model.
\item call: The call to the function.
\item time: Min, mean and max time incurred by the computation, as obtained from \code{\link{comm.timer}}.
}
summary.kappa4nlsBoot: A list of class summary.kappa4nlsBoot with the following components:
\itemize{
\item call: Original call to the \code{kappa4nlsBoot} function.
\item coefficients: A matrix with estimates, estimated errors, and 95\% parameter confidence intervals (based on the inverse empirical distribution function).
\item r.squared: The \eqn{r^{2}} coefficient.
\item sigma: The residual standard error.
\item error: Value of the objective function.
\item time: Min, mean and max time incurred by the computation, as obtained from \code{\link{comm.timer}}.
\item residSum: Summary statistics for the distribution of the residuals.
\item errorSum: Summary statistics for the distribution of the value of the objective function.
}
print.summary.kappa4nlsBoot: The object passed to the function is returned invisibly.
predict.kappa4nlsBoot: A vector of predicted values resulting from the estimated model.
}
\description{
Bootstrap estimates, along with standard errors and confidence intervals, of a nonlinear model, resulting from nonlinear least squares fitting of the four-parameter kappa sigmoidal function.
}
\section{Methods (by class)}{
\itemize{
\item \code{default}: default method for kappa4nlsBoot.
\item \code{kappa4nlsBoot}: print method for kappa4nlsBoot.
\item \code{kappa4nlsBoot}: summary method for kappa4nlsBoot.
\item \code{summary.kappa4nlsBoot}: print method for summary.kappa4nlsBoot.
\item \code{formula}: formula method for kappa4nlsBoot.
\item \code{kappa4nlsBoot}: predict method for kappa4nlsBoot.
}}
|
d910c76defa8305e8af1113be38ab015f8eb9216
|
99bc8c37eb2a40dd7b2dfa3b7215b908fb508b1b
|
/delivery time.R
|
324765c4a6107c497786983579b3bc667c2a0771
|
[] |
no_license
|
karthi-25/Tutorials-on-R-codes
|
1310d418f32e1c08cb81ef52c953cb64d0161e96
|
ba6014cbe47f5645c88dc09a6c0e2b9c4ffca00b
|
refs/heads/main
| 2023-04-19T06:01:56.577593
| 2021-05-06T11:36:18
| 2021-05-06T11:36:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 267
|
r
|
delivery time.R
|
#load data
hi<-delivery_time
#create regression model
model=lm(hi$`Delivery Time`~hi$`Sorting Time`)
summary(model)
#predict for the data set
pred=predict(model,hi)
pred
#load the pred values into final data
final_data=data.frame(pred,hi[,-2])
final_data
plot(model)
|
e60f1e66c740d12c419c02fa5af7c95f1aeba9e5
|
e37f4f64b615dd7871893cc8fc2d6aac0040b828
|
/man/coxdual.strata.Rd
|
9b6bbe706c6130532bf84537d01e7a7235c4a5a2
|
[] |
no_license
|
aboruvka/coxinterval
|
3b6b0ba6b4ad864daa28551b6569ecad72f5c48c
|
de93fd1f12bb16550c8b0d62a16359f8fa96c3b7
|
refs/heads/master
| 2021-01-21T04:54:38.901728
| 2016-06-03T01:24:53
| 2016-06-03T01:24:53
| 17,570,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,184
|
rd
|
coxdual.strata.Rd
|
\name{coxdual.strata}
\alias{coxdual.strata}
\title{Identify transition type in model terms}
\description{
A utility function for \code{\link{coxdual}} that identifies the
state-transition types.
}
\usage{coxdual.strata(from, to)}
\arguments{
\item{from}{
a variable representing the originating state.
}
\item{to}{
a variable representing the subsequent state.
}
}
\value{
A combination of the \code{from} and \code{to} arguments by column
with two attributes:
\item{\code{"states"}}{
a vector giving the unique non-missing values in the \code{from} and
\code{to} arguments ordered so that the initial state appears first,
the intermediate state second, and the terminal state last.
}
\item{\code{"types"}}{
a vector of transition type labels in terms of the values in the
\code{from} and \code{to} arguments ordered so that the
intermediate transition appears first, the terminal transition
directly from the initial state second, and the terminal transition
from the intermediate state last.
}
}
\seealso{
\code{\link{coxdual}}
}
\examples{with(dualrc[1:10, ], coxdual.strata(from, to))}
\keyword{survival}
|
438137d5c3af03edf6a196b27914ab6b3e2dd6f5
|
2b73cb9ae681bc43be9c1d53eee9e6116a1af173
|
/man/addwmfs.Rd
|
580d8b05288f1400b56bf1027713e21792390ab1
|
[] |
no_license
|
cran/wsyn
|
8a161b1c239875cddb0a802e8393b4a1993725e9
|
72e97f83500ebc44fb1a15d968c426ad478d4f9f
|
refs/heads/master
| 2021-08-22T22:28:55.339113
| 2021-06-18T20:10:02
| 2021-06-18T20:10:02
| 167,043,420
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,842
|
rd
|
addwmfs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addwmfs.R
\name{addwmfs}
\alias{addwmfs}
\title{Adds wavelet mean field information to a \code{clust} object}
\usage{
addwmfs(obj)
}
\arguments{
\item{obj}{An object of class \code{clust}}
}
\value{
\code{addwmfs} returns another \code{clust} object with \code{wmfs} slot now included.
If \code{obj$wmfs} was not NA, the object is returned as is.
}
\description{
When a \code{clust} object is created, the \code{wmfs} slot is NA. This function fills it in.
}
\details{
This function uses the values of \code{scale.min}, \code{scale.max.input},
\code{sigma} and \code{f0} stored in \code{obj$methodspecs}. It is possible to create
a \code{clust} object with bad values for these slots. This function throws an error in that
case. You can use a correlation-based method for calculating the synchrony matrix and
still pass values of \code{scale.min}, \code{scale.max.input}, \code{sigma} and \code{f0}
to \code{clust} (in fact, this happens by default) - they won't be used by \code{clust},
but they will be there for later use by \code{addwmfs} and \code{addwpmfs}.
}
\examples{
sig<-matrix(.8,5,5)
diag(sig)<-1
lents<-50
if (requireNamespace("mvtnorm",quietly=TRUE))
{
dat1<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
dat2<-t(mvtnorm::rmvnorm(lents,mean=rep(0,5),sigma=sig))
}else
{
dat1<-t(matrix(rep(rnorm(lents),times=5),lents,5))
dat2<-t(matrix(rep(rnorm(lents),times=5),lents,5))
}
dat<-rbind(dat1,dat2)
times<-1:lents
dat<-cleandat(dat,times,clev=1)$cdat
coords<-data.frame(Y=rep(0,10),X=1:10)
method<-"coh.sig.fast"
clustobj<-clust(dat,times,coords,method,nsurrogs = 100)
res<-addwmfs(clustobj)
}
\seealso{
\code{\link{clust}}, \code{\link{addwpmfs}}, \code{browseVignettes("wsyn")}
}
\author{
Daniel Reuman, \email{reuman@ku.edu}
}
|
d5b29e510bc2433692823d5b9b38f9a0117cf042
|
4b0f029aa00f9d1d7499cc11c26c6898cc08e4d1
|
/R/validate-tactics.R
|
757a094c5eae267236568d9422e99ae07d2ea913
|
[
"Apache-2.0"
] |
permissive
|
Quinn-Yan/attckr
|
6d2d37eb5371f6c48b154706f718a71f9fb0316e
|
1fc2fd066fd54f4edfb380c716f2dc5b719f5bf6
|
refs/heads/master
| 2022-12-13T01:10:51.141220
| 2020-08-11T14:59:49
| 2020-08-11T14:59:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
validate-tactics.R
|
#' Validate Tactics strings against MITRE authoritative source
#'
#' @param tactics a character vector of tactic strings to validate. This will be
#' converted to lower-case, left/right spaces will be trimmed and
#' internal spaces will be converted to a single `-`
#' @param matrix which matrix to use when validating?
#' @param na_rm remove NA's before comparing?
#' @return `TRUE` if all tactics validate, otherwise `FALSE` with messages
#' identifying the invalid tactics.
#' @export
#' @examples
#' validate_tactics("persistence")
#' validate_tactics(c("persistence", "Persistence", "Persistance"))
validate_tactics <- function(tactics, matrix = c("enterprise", "mobile", "pre"),
na_rm = TRUE) {
matrix <- match.arg(matrix[1], c("enterprise", "mobile", "pre"))
switch(
matrix,
enterprise = "mitre-attack",
mobile = "mitre-mobile-attack",
pre = "mitre-pre-attack"
) -> tax
tax <- unique(tidy_attack[tidy_attack$matrix == tax, "tactic", drop=TRUE])
if (na_rm) {
no_na <- na.exclude(tactics)
where_nas <- attr(no_na, "na.action", exact = TRUE)
if (length(where_nas)) message("Removed ", length(where_nas), " NA values.\n")
tac <- as.character(no_na)
}
o_tac <- tactics
tac <- normalize_identifier(tactics)
bad <- o_tac[which(!(tac %in% tax))]
if (length(bad)) {
warning(
"Tactics not in the ", matrix, " MITRE ATT&CK matrix found\n",
paste0(sprintf('- "%s"', sort(unique(bad))), collapse = "\n"),
call. = FALSE
)
invisible(sort(unique(bad)))
} else {
message(
"All tactics were found in the ", matrix, " MITRE ATT&CK matrix"
)
invisible(TRUE)
}
}
|
3172f3eedfc70f040db81f50434d2c6a1d2670bf
|
a9c83b44c60b998a390c905163387aae9c0c0543
|
/src/main/scripts/R/dataManipulation.R
|
724c0c3d82c0ff4bb57c4cb64d16f011816988fa
|
[] |
no_license
|
MengsiLu/population-linkage-master
|
7c91e527deff32470fa25913e0f5f472904afcb5
|
77fa04f2ef1ab64af12e834cc46a3765d387ba71
|
refs/heads/master
| 2022-12-04T14:11:48.396656
| 2020-08-21T14:49:55
| 2020-08-21T14:49:55
| 289,294,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,052
|
r
|
dataManipulation.R
|
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summarized
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data = NULL, measurevar, groupvars = NULL, na.rm = FALSE, conf.interval = .95, .drop = TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function(x, na.rm = FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop = .drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm = na.rm),
mean = mean(xx[[col]], na.rm = na.rm),
sd = sd(xx[[col]], na.rm = na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval / 2 + .5, datac$N - 1)
datac$ci <- datac$se * ciMult
return(datac)
}
recall <- function(tp, fn) {
return(tp / (tp + fn))
}
precision <- function(tp, fp) {
return(tp / (tp + fp))
}
false_positive_rate <- function(tn, fp) {
return(fp / (tn + fp))
}
fmeasure <- function(precision, recall) {
return(2 * (precision * recall) / (precision + recall))
}
# The Von Bertalanffy function
eval_vbt <- function(x, a, k, c) {
return(a * exp(k * x) + c)
}
|
79bb1f036e5c86bce822709bd294a5240eaedb4f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmomco/examples/parTLgld.Rd.R
|
4362f0bfdea4616c9e1e4a698ad66dc8d5d9f140
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,624
|
r
|
parTLgld.Rd.R
|
library(lmomco)
### Name: parTLgld
### Title: Estimate the Parameters of the Generalized Lambda Distribution
### using Trimmed L-moments (t=1)
### Aliases: parTLgld
### Keywords: distribution (parameters) Distribution: Generalized Lambda
### ** Examples
# As of version 1.6.2, it is felt that in spirit of CRAN CPU
# reduction that the intensive operations of parTLgld() should
# be kept a bay.
## Not run:
##D X <- rgamma(202,2) # simulate a skewed distribution
##D lmr <- TLmoms(X, trim=1) # compute trimmed L-moments
##D PARgldTL <- parTLgld(lmr) # fit the GLD
##D
##D F <- pp(X) # plotting positions for graphing
##D plot(F,sort(X), col=8, cex=0.25)
##D lines(F, qlmomco(F,PARgldTL)) # show the best estimate
##D if(! is.null(PARgldTL$rest)) {
##D n <- length(PARgldTL$rest$xi)
##D other <- unlist(PARgldTL$rest[n,1:4]) # show alternative
##D lines(F, qlmomco(F,vec2par(other, type="gld")), col=2)
##D }
##D # Note in the extraction of other solutions that no testing for whether
##D # additional solutions were found is made. Also, it is quite possible
##D # that the other solutions "[n,1:4]" is effectively another numerical
##D # convergence on the primary solution. Some users of this example thus
##D # might not see two separate lines. Users are encouraged to inspect the
##D # rest of the solutions: print(PARgld$rest)
##D
##D # For one run of the above example, the GLD results follow
##D #print(PARgldTL)
##D #$type
##D #[1] "gld"
##D #$para
##D # xi alpha kappa h
##D # 1.02333964 -3.86037875 -0.06696388 -0.22100601
##D #$delTau5
##D #[1] -0.02299319
##D #$error
##D #[1] 7.048409e-08
##D #$source
##D #[1] "pargld"
##D #$rest
##D # xi alpha kappa h attempt delTau5 error
##D #1 1.020725 -3.897500 -0.06606563 -0.2195527 6 -0.02302222 1.333402e-08
##D #2 1.021203 -3.895334 -0.06616654 -0.2196020 4 -0.02304333 8.663930e-11
##D #3 1.020684 -3.904782 -0.06596204 -0.2192197 5 -0.02306065 3.908918e-09
##D #4 1.019795 -3.917609 -0.06565792 -0.2187232 2 -0.02307092 2.968498e-08
##D #5 1.023654 -3.883944 -0.06668986 -0.2198679 7 -0.02315035 2.991811e-07
##D #6 -4.707935 -5.044057 5.89280906 -0.3261837 13 0.04168800 2.229672e-10
## End(Not run)
## Not run:
##D F <- seq(.01,.99,.01)
##D plot(F,qlmomco(F, vec2par(c( 1.02333964, -3.86037875,
##D -0.06696388, -0.22100601), type="gld")),
##D type="l")
##D lines(F,qlmomco(F, vec2par(c(-4.707935, -5.044057,
##D 5.89280906, -0.3261837), type="gld")))
## End(Not run)
|
ee03355df6660dd70da3190204f1b0419672c434
|
54ead174cfc1d2f8e8246ec0f25c3ba026b5fe39
|
/man/satisfy.Rd
|
06caf5c9d0b6e2830ee0a896d6a360ac122ab64b
|
[
"MIT"
] |
permissive
|
edlee123/Ramble
|
ebe3398825265c1d68f86bacbb0ea84c2ea49178
|
1081ae736929551be201b48c0c4aa16f8df4d4ef
|
refs/heads/master
| 2020-12-30T15:54:39.942970
| 2017-04-06T11:57:18
| 2017-04-06T11:57:18
| 91,184,310
| 1
| 0
| null | 2017-05-13T15:35:45
| 2017-05-13T15:35:44
| null |
UTF-8
|
R
| false
| true
| 438
|
rd
|
satisfy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parser.R
\name{satisfy}
\alias{satisfy}
\title{\code{satisfy} is a function which allows us to make parsers that recognise single symbols.}
\usage{
satisfy(p)
}
\arguments{
\item{p}{is the predicate to determine if the arbitrary symbol is a member.}
}
\description{
\code{satisfy} is a function which allows us to make parsers that recognise single symbols.
}
|
71e0ec80c3efe9d260468f5fa7263f1f02427678
|
6b286ff42ae9135bcaeb1d8d537460f532ebab45
|
/R/citations.R
|
0f76f27d3ac36ee0107cc8bc53e5615af4ad5554
|
[] |
no_license
|
cran/move
|
6864db092eba41580170d4a09c5124758986b3ea
|
559c7a0ff40bd070373b82b43b880a862d4a33e2
|
refs/heads/master
| 2023-07-21T16:48:30.636533
| 2023-07-06T22:10:02
| 2023-07-06T22:10:02
| 17,697,651
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
citations.R
|
setGeneric("citations", function(obj) standardGeneric("citations"))
setMethod("citations", ".MoveGeneral", function(obj) {
return(obj@citation)
})
setGeneric("citations<-", function(obj, value) standardGeneric("citations<-"))
setReplaceMethod("citations", ".MoveGeneral", function(obj, value) {
if (length(value) != 1) {
value <- as.character(value[1])
warning("There were more than one citation entered. Only using first element")
}
slot(obj, "citation", check = T) <- value
obj
})
|
bed0951c6195915b6c02b53aaf3919333a85ab7d
|
1b5cb6a23dd47e9b0ff9171721e67251afe7dbc8
|
/cachematrix.R
|
f82c18c024a099d1dc21b649826a88b69290f6e2
|
[] |
no_license
|
narishman/ProgrammingAssignment2
|
a2bf312e4130c3634360b635922074aa66d0275d
|
54e26406dcf22dfdf58bffe7d4f9c6138b7bfbd6
|
refs/heads/master
| 2020-12-25T08:00:03.911712
| 2015-02-22T21:52:09
| 2015-02-22T21:52:09
| 31,178,833
| 0
| 0
| null | 2015-02-22T20:24:46
| 2015-02-22T20:24:44
| null |
UTF-8
|
R
| false
| false
| 1,580
|
r
|
cachematrix.R
|
# These functions help optimize the runtime of creating inverse matrices by caching
# the computed inverse matrix and reuse them if the base matrix does not change.
## Function : makeCacheMatrix
## This function creates an object with the following attributes
## x - holds the matrix
## invmatrix - holds the inverse matrix
## set() - sets up the matrix within the object for x
## get() - gets the matrix from within the object from x
## setinvmatrix() - sets up the inverse matrix within object for invmatrix
## getinvmatrix() - gets the inverse matrix within object from invmatrix
makeCacheMatrix <- function(x = matrix()) {
invmatrix <- NULL
set <- function (y) {
x <<- y
invmatrix <<- NULL
}
get <- function() x
setinvmatrix <- function (i) invmatrix <<- i
getinvmatrix <- function () invmatrix
list (set = set, get = get,
setinvmatrix = setinvmatrix,
getinvmatrix = getinvmatrix)
}
## Function : cacheSolve
## This function takes in an object of 'makeCacheMatrix' and queries it for an inverse matrix
## If an inverse matrix is found cached, it returns the inverse matrix
## Else it computes the inverse matrix and caches it within the object and also returns it to
## the caller.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinvmatrix()
if (!is.null(i)) {
message("getting cached inverted matrix")
return(i)
}
matrix <- x$get()
i <- solve(matrix)
x$setinvmatrix(i)
i
}
|
2f6ffe477cbedb638df7fa3d3d19484ab9bb6068
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/yhat/examples/commonalityCoefficients.Rd.R
|
8436e5e0c642a2e8f2124e8611dbd4ae5e840129
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
commonalityCoefficients.Rd.R
|
library(yhat)
### Name: commonalityCoefficients
### Title: Commonality Coefficents
### Aliases: commonalityCoefficients
### Keywords: models regression
### ** Examples
## Predict miles per gallon based on vehicle weight, type of
## carborator, & number of engine cylinders
commonalityCoefficients(mtcars,"mpg",list("wt","carb","cyl"))
## Predict paragraph comprehension based on four verbal
## tests: general info, sentence comprehension, word
## classification, & word type
## Use HS dataset in MBESS
require ("MBESS")
data(HS.data)
## Commonality Coefficient Analysis
commonalityCoefficients(HS.data,"paragrap",list("general",
"sentence","wordc","wordm"))
|
3a6e2b6221cdd32dbed005cb2e81b1e259178400
|
7ec3de26bcd47df4ed3f5aa227cd0dda0125e9ba
|
/PA1/corr.R
|
4d870290322ae86281858fc6194e92021e38528a
|
[] |
no_license
|
505515/RProgramming
|
ab02fe50a8d51255c0c56542eada1e23c5dfca58
|
da6ad0a02ef7c00be54f02c1db19d57ff69cca0c
|
refs/heads/master
| 2021-01-25T06:00:50.831567
| 2015-05-04T19:29:19
| 2015-05-04T19:29:19
| 33,884,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
files_list <- list.files(directory, full.names=TRUE)
dat <- data.frame()
#final <- vector(mode="integer", length=332)
final <- numeric()
for (i in 1:332) {
temp_cor <- vector()
dat <- read.csv(files_list[i])
dat_na <- dat[complete.cases(dat),]
nobs <- nrow(dat_na)
if (nobs > threshold) {
temp_cor <- cor(dat_na[["sulfate"]], dat_na[["nitrate"]])
final <- c(final, temp_cor)
}
}
round(final, digits = 4)
}
|
dbdc6542f20e450efb400721aa7bb8d62cfbdc22
|
3063fbc948249327a3f5c6a4e834bec8443eecb0
|
/20180726_PieChartTXNDecreased.R
|
3c24cc1069c196a9f63f6ff1e702c026bfa0f15e
|
[] |
no_license
|
olgabane/10XGenomics
|
98737a2791e483417c3f33c8839bfa7e52cb79f1
|
ed908f5bd3e289d40cdf7032d9953433863ededb
|
refs/heads/master
| 2022-05-26T03:46:49.774053
| 2018-08-23T19:00:06
| 2018-08-23T19:00:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 653
|
r
|
20180726_PieChartTXNDecreased.R
|
#Pie chart downregulated genes
#20180726_PieChart_TXNDecreased.R
#For poster
#July 26, 2018
a<-c(8,5,3,20,13,11,23,9,41,8,72,43,62,4,16,104,45,58)
b<-c("ATP", "Calcium binding", "Cell adhesion", "Chromatin regulation", "Cytoskeleton and ECM", "GTPase and GPCR"
,"Kinases and phosphatases"
,"lncRNA"
,"Metabolic enzyme"
,"Mitochondrial proteins"
,"Ribosomal protein"
,"RNA binding and regulation"
,"Transcriptional regulator"
,"Synaptic protein"
,"Transmembrane and cell surface"
,"Unknown"
,"Protein regulator"
,"Other")
df<- data.frame(b, a)
df <- df[order(a),]
quartz("a", 6, 6)
pie(df$a, df$b, col=rainbow(length(df$b)), cex = 0.7, radius = 0.7)
|
5eea1f266e90d629ebad19e0b527fc4dd897c67a
|
7485b5784c0a27a0dc480f87296493ea3fe557f9
|
/scripts/parciais/analise.r
|
ffe07c1d1cdf9184dd9922ccd3e3e8cc2cc04408
|
[] |
no_license
|
marciobarros/VisualNRP
|
041e5abe6153a036118b7c2482f7b1d592b4b934
|
cda23e9e173e1ecbb5a13ff415b80a445f90c2bb
|
refs/heads/master
| 2020-12-26T02:39:51.224688
| 2017-03-16T22:27:18
| 2017-03-16T22:27:18
| 53,698,777
| 1
| 0
| null | 2016-03-11T21:49:11
| 2016-03-11T21:49:11
| null |
UTF-8
|
R
| false
| false
| 798
|
r
|
analise.r
|
rm(list=ls())
# basedir <- "/Users/marcio"
# basedir <- "/Users/marcio.barros"
basedir <- "~"
zipfile <- paste(basedir, "/Desktop/Codigos/VisualNRP/results/analysis/resultados - bsgreedy.zip", sep="")
data <- read.table(unz(zipfile, "saida.txt"), sep=";", header=FALSE)
library("data.table")
dt <- data.table(data)
means <- dt[, .(mean = mean(V5)), by=list(V1, V2)]
meanByAlgorithm <- reshape(means, v.names = "mean", idvar = c("V2"), timevar = "V1", direction = "wide")
# print(result, nrows=200)
sds <- dt[, .(sd = sd(V5)), by=list(V1, V2)]
sdByAlgorithm <- reshape(sds, v.names = "sd", idvar = c("V2"), timevar = "V1", direction = "wide")
maxs <- dt[, .(max = max(V5)), by=list(V1, V2)]
maxByAlgorithm <- reshape(maxs, v.names = "max", idvar = c("V2"), timevar = "V1", direction = "wide")
|
bc54edc8436fc92d286dac24a9306b1aa4266b76
|
0e8328adf2b9eb4af93f93d1e3bb209787bf5c07
|
/Calculation.R
|
8d9128d05327f59815d8561e4adffa97a5c89bb9
|
[] |
no_license
|
ganluannj/weights_Holm_procedure
|
e73d34bdec4009330179329b3d94419837776ea2
|
229e9ec0c6caa2ac387a54654f1ca984e09f3050
|
refs/heads/master
| 2022-06-09T23:29:46.316269
| 2020-05-06T21:46:24
| 2020-05-06T21:46:24
| 260,344,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,403
|
r
|
Calculation.R
|
Calculation<-function(w1list,mu1, mu2=0, mu3, mu4=0, sigma1=1, sigma2=1,
sigma3=1, sigma4=1, N, alpha=0.05, Method='OF')
{
library(gsDesign)
library(mvtnorm)
LEN=length(w1list)
# generate a dataframe to store the result
# value in Result represent the probability
Result<-data.frame('w1'=w1list, 'w2'=1-w1list, 'Rej_both'=rep(0, LEN),
'Rej_1_only'=rep(0, LEN), 'Rej_2_only'=rep(0, LEN),
'No_rej'=rep(0, LEN),
'Rej_atleast_one'=rep(0, LEN),'Rej_1'=rep(0,LEN),
'Rej_2'=rep(0, LEN))
Delta1=mu1-mu2
Delta2=mu3-mu4
sigmatilde1=sqrt(sigma1^2+sigma2^2)
sigmatilde2=sqrt(sigma3^2+sigma4^2)
Mean1=c(sqrt(N)*Delta1/(sqrt(2)*sigmatilde1), sqrt(N)*Delta1/sigmatilde1)
Mean2=c(sqrt(N)*Delta2/(sqrt(2)*sigmatilde2), sqrt(N)*Delta2/sigmatilde2)
V<-c(1, sqrt(1/2), sqrt(1/2),1)
M<-matrix(V, nrow=2)
B<-gsDesign(k=2, alpha=alpha,test.type=1,sfu=Method)$upper$bound
for (i in 1:LEN){
w1=w1list[i]
w2=1-w1
B1<-gsDesign(k=2, alpha=alpha*w1,test.type=1,sfu=Method)$upper$bound
B2<-gsDesign(k=2, alpha=alpha*w2,test.type=1,sfu=Method)$upper$bound
###############################################################################
############### Reject at least one ##########################################
############### #########################################
###########################################################################
## probability of not rejecting hypothesis 1
p1<-pmvnorm(lower=c(-Inf, -Inf), upper = B1, mean=Mean1, sigma=M)[[1]]
## probability of not rejecting hypothesis 2
p2<-pmvnorm(lower=c(-Inf, -Inf), upper = B2, mean=Mean2, sigma=M)[[1]]
## probability of rejecting at least one
patleast1<-1-p1*p2
Result[i,'Rej_atleast_one']<-patleast1
###############################################################################
############### Reject only 1 (A) ##########################################
############### #########################################
###########################################################################
## first the probabilty of rejecting hypothesis 1 at middle
p1<-pmvnorm(lower=c(B1[1], -Inf), upper = c(Inf, Inf), mean=Mean1, sigma=M)[[1]]
p2<-pmvnorm(lower=c(-Inf, -Inf), upper = B, mean=Mean2, sigma=M)[[1]]
pmid<-p1*p2
## first the probabilty of rejecting hypothesis 1 at final
p1<-pmvnorm(lower=c(-Inf, B1[2]), upper = c(B1[1], Inf), mean=Mean1, sigma=M)[[1]]
p2<-pmvnorm(lower=c(-Inf, -Inf), upper = c(B2[1], B[2]), mean=Mean2, sigma=M)[[1]]
pfinal<-p1*p2
ponlyA<-pmid+pfinal
Result[i,'Rej_1_only']<-ponlyA
###############################################################################
############### Reject only 2 (B) ##########################################
############### #########################################
###########################################################################
## first the probabilty of rejecting hypothesis 2 at middle
p1<-pmvnorm(lower=c(-Inf, -Inf), upper = B, mean=Mean1, sigma=M)[[1]]
p2<-pmvnorm(lower=c(B2[1], -Inf), upper = c(Inf, Inf), mean=Mean2, sigma=M)[[1]]
pmid<-p1*p2
## first the probabilty of rejecting hypothesis 2 at final
p1<-pmvnorm(lower=c(-Inf, -Inf), upper = c(B1[1], B[2]), mean=Mean1, sigma=M)[[1]]
p2<-pmvnorm(lower=c(-Inf, B2[2]), upper = c(B2[1], Inf), mean=Mean2, sigma=M)[[1]]
pfinal<-p1*p2
ponlyB<-pmid+pfinal
Result[i,'Rej_2_only']<-ponlyB
#################################
Rejboth<-patleast1-ponlyA-ponlyB
Rej1<-ponlyA+Rejboth
Rej2<-ponlyB+Rejboth
Result[i,'Rej_both']<-Rejboth
Result[i,'Rej_1']<-Rej1
Result[i,'Rej_2']<-Rej2
Result[i, 'No_rej']<-1-patleast1
}
# reshape Result for plotting
library("tidyverse")
Result<-Result %>%
gather(key = "variable", value = "probability", -w1, -w2)
return(Result)
}
#w1list=c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)
w1list=c(0.1,0.2)
Result<-Calculation(w1list=w1list, mu1=0.2, mu2=0, mu3=0.2, mu4=0, sigma1=1, sigma2=1,
sigma3=1, sigma4=1, N=150, alpha=0.05, Method='OF')
library(ggplot2)
# for example we want to plot probability of
# reject both and probability of
# rejecting at least one together
#df<-Result[Result$variable %in% c('Rej_1', 'Rej_2'),]
df<-Result[Result$variable %in% c('Rej_both', 'Rej_atleast_one'),]
p<-ggplot(df, aes(x=w1, y=probability))
p<-p+geom_line(aes(colour=variable), size=1.2)
p<-p+geom_point(aes(colour=variable), size=3)
p<-p+ggtitle("Probability vs weight A")
p<-p+theme(plot.title = element_text(hjust = 0.5),
legend.text = element_text(size=12),
legend.position=c(0.5, 0.1),
legend.direction = "vertical",
axis.text=element_text(size=14),
axis.title=element_text(size=14))
p<-p+theme(legend.title = element_blank())
p<-p+theme(axis.title.x = element_text(size=14))
p<-p+xlab('Weight A')
p<-p+theme(axis.title.y = element_text(size=14))
p<-p+ylim(0,1)
p<-p+scale_color_discrete(labels = c("Reject at least one hypothesis", "Reject both hypotheses"))
# ggsave(filename = paste0('mu1_0.2_mu3_0.2_p1.png'),
# plot=p, width=4, height=6, units='in')
p
|
620637b0f3b54c22e92af692efd4f097efdb6c8b
|
e56247c094ad626694e2d187930f774362616d2d
|
/R/state.R
|
5e53fd7b0be7c3ed7b9b56dcff7ec5dda828c067
|
[] |
no_license
|
pmur002/rdataviewer
|
2996fc981e84d77f4f205e2a0162bdd25675317a
|
31459cf81ae9b28a86f9d18e3c0f09b26d46f011
|
refs/heads/master
| 2021-01-02T09:15:27.076865
| 2011-11-29T22:34:55
| 2011-11-29T22:34:55
| 32,362,192
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
state.R
|
# Just an S4 object
# Other options might include a closure to avoid copying?
setClass("ViewerStateSimple",
representation(lrmode="character",
udmode="character",
fontsize="numeric"),
prototype(lrmode="left-to-right",
udmode="top-to-bottom",
fontsize=10),
contains="ViewerState")
viewerState <- function() {
new("ViewerStateSimple")
}
setMethod("lrmode", signature(state="ViewerStateSimple"),
function(state) {
state@lrmode
})
setMethod("udmode", signature(state="ViewerStateSimple"),
function(state) {
state@udmode
})
setMethod("fontsize", signature(state="ViewerStateSimple"),
function(state) {
state@fontsize
})
setMethod("lrmode<-", signature(state="ViewerStateSimple"),
function(state, value) {
state@lrmode <- value
state
})
setMethod("udmode<-", signature(state="ViewerStateSimple"),
function(state, value) {
state@udmode <- value
state
})
setMethod("fontsize<-", signature(state="ViewerStateSimple"),
function(state, value) {
state@fontsize <- value
state
})
|
2c2935981c18cb6399f7aef5c44c7e95e1c0e852
|
119b181488acae0e7d49a5d35ee7decf527ebe44
|
/man/removeMissingSectors.Rd
|
e28ab353e1682aadff649396d4f57fb6fdd2e43d
|
[
"MIT"
] |
permissive
|
USEPA/useeior
|
0d46f1ca9ca1756e1760b153be620a234fddda03
|
169ae5a16c4e367a3c39ceabff3c85f0b4e187a1
|
refs/heads/master
| 2023-08-06T19:03:28.121338
| 2023-07-14T18:39:13
| 2023-07-14T18:39:13
| 221,473,707
| 30
| 24
|
MIT
| 2023-09-06T15:47:55
| 2019-11-13T14:07:05
|
R
|
UTF-8
|
R
| false
| true
| 517
|
rd
|
removeMissingSectors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SatelliteFunctions.R
\name{removeMissingSectors}
\alias{removeMissingSectors}
\title{Removes flow data where sectors are NA after mapping. Should only be used after checkSatelliteFlowLoss}
\usage{
removeMissingSectors(tbs)
}
\arguments{
\item{tbs, }{totals-by-sector df in model schema}
}
\value{
df, the modified tbs
}
\description{
Removes flow data where sectors are NA after mapping. Should only be used after checkSatelliteFlowLoss
}
|
05a1fee9c61f0581a727816e664445d10780bd3a
|
ee45d5d568f9911ae50f049a0d4f2408677fa8ec
|
/man/Attributes.Rd
|
8f4104f7d8f69cf7bfa1493b072c319e9e3097eb
|
[] |
no_license
|
cran/MullerPlot
|
b0fe7d9f2e1e6193dfce72bb77b81c36f21346b1
|
9df71feb53ecb8163452ed727f7fa19aa5c92459
|
refs/heads/master
| 2022-05-17T20:42:45.055716
| 2022-04-27T10:40:02
| 2022-04-27T10:40:02
| 71,505,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 654
|
rd
|
Attributes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Attributes}
\alias{Attributes}
\title{Attributes of OTUs}
\format{
A matrix with 3 columns and 8 rows.
}
\usage{
data(Attributes)
}
\description{
A matrix with 3 columns and 8 rows.
}
\details{
The first column contains OTU names:
"RBL636","rkd D5891 bcc","ylnC-48","iglK f12","BAeR G11","nuhj-25","HwrK-41","QecF*22"
The second column contains parents of the corresponding OTUs:
NA ,"RBL636","RBL636","ylnC-48","rkd D5891 bc","iglK f12","rkd D5891 bc","nuhj-25"
and the third column contains colors of corresponding OTUs.
}
\keyword{datasets}
|
75cf9d52c7040d01628986f240ab0b2a1a8f7865
|
f2e83e4afb99d5779e11702f518cb76c645c9982
|
/man/block.map.matrix.Rd
|
196c5dbf801c556ed549efab39d2980a83ad4e8f
|
[] |
no_license
|
cran/ldlasso
|
c9503fd9a65fb3488f1ebd2d2caca9a4c30dba28
|
ed253e1f1271d7e0173e1ef733effeff281112b1
|
refs/heads/master
| 2020-06-06T05:12:21.935945
| 2013-01-02T00:00:00
| 2013-01-02T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
rd
|
block.map.matrix.Rd
|
\name{block.map.matrix}
\alias{block.map.matrix}
\title{ Creates an indicator matrix for haplotype block boundaries, for
use in ld_lasso.
}
\description{
Simple function that maps the block boundary vector to an indicator
matrix for use in the definition of constraint matrix. This matrix
ensures that only within block SNP pairs are considered.
}
\usage{
block.map.matrix(block.cood)
}
\arguments{
\item{block.cood}{
A vector of length p+1, where p is the number of SNPs. block.cood is an indicator vector
that indicates block boundaries at all p+1 SNP bounded intervals. Use
find.bounds to create this vector.
}
}
\value{
A matrix of logical variables. If the (i,j) entry is TRUE than SNP i
and SNP j are in the same haplotype block.
}
\author{
Samuel G. Younkin
}
\seealso{
ld_lasso
}
|
8e98dd95d9d92a63a2cd1fb834219adc0a8ea61f
|
ab00bc7e17121d2dcf3741dc9f650a4e76ed4a44
|
/tests/testthat/test-mutate.R
|
b631efa6ba6ab2766e01596d9c20155ac4b02e83
|
[
"MIT"
] |
permissive
|
tidyverse/dplyr
|
9b7fdc07e6a70bc8e802094e2e2a127af22bcc02
|
cf8031d00f406c6dc5d483d7e9e34639df797b81
|
refs/heads/main
| 2023-09-01T03:52:50.608019
| 2023-08-25T13:42:29
| 2023-08-25T13:42:29
| 6,427,813
| 3,290
| 1,982
|
NOASSERTION
| 2023-09-09T20:14:25
| 2012-10-28T13:39:17
|
R
|
UTF-8
|
R
| false
| false
| 25,145
|
r
|
test-mutate.R
|
test_that("empty mutate returns input", {
df <- tibble(x = 1)
gf <- group_by(df, x)
expect_equal(mutate(df), df)
expect_equal(mutate(df, .by = x), df)
expect_equal(mutate(gf), gf)
expect_equal(mutate(df, !!!list()), df)
expect_equal(mutate(df, !!!list(), .by = x), df)
expect_equal(mutate(gf, !!!list()), gf)
})
test_that("rownames preserved", {
df <- data.frame(x = c(1, 2), row.names = c("a", "b"))
df <- mutate(df, y = 2)
expect_equal(row.names(df), c("a", "b"))
df <- mutate(df, y = 2, .by = x)
expect_equal(row.names(df), c("a", "b"))
})
test_that("mutations applied progressively", {
df <- tibble(x = 1)
expect_equal(df %>% mutate(y = x + 1, z = y + 1), tibble(x = 1, y = 2, z = 3))
expect_equal(df %>% mutate(x = x + 1, x = x + 1), tibble(x = 3))
expect_equal(df %>% mutate(x = 2, y = x), tibble(x = 2, y = 2))
df <- data.frame(x = 1, y = 2)
expect_equal(
df %>% mutate(x2 = x, x3 = x2 + 1),
df %>% mutate(x2 = x + 0, x3 = x2 + 1)
)
})
test_that("length-1 vectors are recycled (#152)", {
df <- tibble(x = 1:4)
expect_equal(mutate(df, y = 1)$y, rep(1, 4))
expect_error(mutate(df, y = 1:2))
})
test_that("can remove variables with NULL (#462)", {
df <- tibble(x = 1:3, y = 1:3)
gf <- group_by(df, x)
expect_equal(df %>% mutate(y = NULL), df[1])
expect_equal(gf %>% mutate(y = NULL), gf[1])
# even if it doesn't exist
expect_equal(df %>% mutate(z = NULL), df)
# or was just created
expect_equal(df %>% mutate(z = 1, z = NULL), df)
# regression test for https://github.com/tidyverse/dplyr/issues/4974
expect_equal(
mutate(data.frame(x = 1, y = 1), z = 1, x = NULL, y = NULL),
data.frame(z = 1)
)
})
test_that("mutate() names pronouns correctly (#2686)", {
expect_named(mutate(tibble(x = 1), .data$x), "x")
expect_named(mutate(tibble(x = 1), .data[["x"]]), "x")
})
test_that("mutate() supports unquoted values", {
df <- tibble(g = c(1, 1, 2, 2, 2), x = 1:5)
expect_identical(mutate(df, out = !!1), mutate(df, out = 1))
expect_identical(mutate(df, out = !!(1:5)), mutate(df, out = 1:5))
expect_identical(mutate(df, out = !!quote(1:5)), mutate(df, out = 1:5))
gdf <- group_by(df, g)
expect_identical(mutate(gdf, out = !!1), mutate(gdf, out = 1))
})
test_that("assignments don't overwrite variables (#315)", {
df <- tibble(x = 1, y = 2)
out <- df %>% mutate(z = {x <- 10; x})
expect_equal(out, tibble(x = 1, y = 2, z = 10))
})
test_that("can mutate a data frame with zero columns", {
df <- new_data_frame(n = 2L)
expect_equal(mutate(df, x = 1), data.frame(x = c(1, 1)))
})
test_that("mutate() handles symbol expressions", {
df <- tibble(x = structure(1, class = "alien"))
res <- mutate(df, y = x)
expect_identical(df$x, res$y)
gf <- group_by(df, x)
res <- mutate(df, y = x)
expect_identical(df$x, res$y)
})
test_that("mutate() supports constants (#6056, #6305)", {
df <- data.frame(x = 1:10, g = rep(1:2, each = 5))
y <- 1:10
z <- 1:5
expect_identical(df %>% mutate(y = !!y) %>% pull(y), y)
expect_identical(df %>% group_by(g) %>% mutate(y = !!y) %>% pull(y), y)
expect_identical(df %>% rowwise() %>% mutate(y = !!y) %>% pull(y), y)
expect_snapshot({
(expect_error(df %>% mutate(z = !!z)))
(expect_error(df %>% group_by(g) %>% mutate(z = !!z)))
(expect_error(df %>% rowwise() %>% mutate(z = !!z)))
})
# `.env$` is used for per group evaluation
expect_identical(df %>% mutate(y = .env$y) %>% pull(y), y)
expect_identical(df %>% group_by(g) %>% mutate(z = .env$z) %>% pull(z), c(z, z))
expect_snapshot({
(expect_error(df %>% group_by(g) %>% mutate(y = .env$y)))
(expect_error(df %>% rowwise() %>% mutate(y = .env$y)))
})
})
test_that("can't overwrite column active bindings (#6666)", {
skip_if(getRversion() < "3.6.3", message = "Active binding error changed")
df <- tibble(g = 1:2, x = 3:4)
gdf <- group_by(df, g)
# The error seen here comes from trying to `<-` to an active binding when
# the active binding function has 0 arguments.
expect_snapshot(error = TRUE, {
mutate(df, y = {
x <<- 2
x
})
})
expect_snapshot(error = TRUE, {
mutate(df, .by = g, y = {
x <<- 2
x
})
})
expect_snapshot(error = TRUE, {
mutate(gdf, y = {
x <<- 2
x
})
})
})
test_that("assigning with `<-` doesn't affect the mask (#6666)", {
df <- tibble(g = 1:2, x = 3:4)
gdf <- group_by(df, g)
out <- mutate(df, .by = g, y = {
x <- x + 2L
x
})
expect_identical(out$x, c(3L, 4L))
expect_identical(out$y, c(5L, 6L))
out <- mutate(gdf, y = {
x <- x + 2L
x
})
expect_identical(out$x, c(3L, 4L))
expect_identical(out$y, c(5L, 6L))
})
test_that("`across()` inline expansions that use `<-` don't affect the mask (#6666)", {
df <- tibble(g = 1:2, x = 3:4)
out <- df %>%
mutate(
across(x, function(col) {
col <- col + 2L
col
}),
.by = g
)
expect_identical(out$x, c(5L, 6L))
})
test_that("can't share local variables across expressions (#6666)", {
df <- tibble(x = 1:2, y = 3:4)
expect_snapshot(error = TRUE, {
mutate(
df,
x2 = {
foo <- x
x
},
y2 = {
foo
}
)
})
})
# column types ------------------------------------------------------------
test_that("glue() is supported", {
expect_equal(
tibble(x = 1) %>% mutate(y = glue("")),
tibble(x = 1, y = glue(""))
)
})
test_that("mutate disambiguates NA and NaN (#1448)", {
df <- tibble(x = c(1, NA, NaN))
out <- mutate(df, y = x * 1)
expect_equal(out$y, df$x)
})
test_that("mutate preserves names (#1689, #2675)", {
df <- tibble(a = 1:3)
out1 <- df %>% mutate(b = setNames(1:3, letters[1:3]))
out2 <- df %>% mutate(b = setNames(as.list(1:3), letters[1:3]))
expect_named(out1$b, letters[1:3])
expect_named(out2$b, letters[1:3])
})
test_that("mutate handles matrix columns", {
df <- data.frame(a = rep(1:3, each = 2), b = 1:6)
df_regular <- mutate(df, b = scale(b))
df_grouped <- mutate(group_by(df, a), b = scale(b))
df_rowwise <- mutate(rowwise(df), b = scale(b))
expect_equal(dim(df_regular$b), c(6, 1))
expect_equal(dim(df_grouped$b), c(6, 1))
expect_equal(dim(df_rowwise$b), c(6, 1))
})
test_that("mutate handles data frame columns", {
df <- data.frame("a" = c(1, 2, 3), "b" = c(2, 3, 4), "base_col" = c(3, 4, 5))
res <- mutate(df, new_col = data.frame(x = 1:3))
expect_equal(res$new_col, data.frame(x = 1:3))
res <- mutate(group_by(df, a), new_col = data.frame(x = a))
expect_equal(res$new_col, data.frame(x = 1:3))
res <- mutate(rowwise(df), new_col = data.frame(x = a))
expect_equal(res$new_col, data.frame(x = 1:3))
})
test_that("unnamed data frames are automatically unspliced (#2326, #3630)", {
expect_identical(
tibble(a = 1) %>% mutate(tibble(b = 2)),
tibble(a = 1, b = 2)
)
expect_identical(
tibble(a = 1) %>% mutate(tibble(b = 2), tibble(b = 3)),
tibble(a = 1, b = 3)
)
expect_identical(
tibble(a = 1) %>% mutate(tibble(b = 2), c = b),
tibble(a = 1, b = 2, c = 2)
)
})
test_that("named data frames are packed (#2326, #3630)", {
df <- tibble(x = 1)
out <- df %>% mutate(y = tibble(a = x))
expect_equal(out, tibble(x = 1, y = tibble(a = 1)))
})
test_that("unchop only called for when multiple groups", {
df <- data.frame(g = 1, x = 1:5)
out <- mutate(df, x = ts(x, start = c(1971, 1), frequency = 52))
expect_s3_class(out$x, "ts")
gdf <- group_by(df, g)
out <- mutate(gdf, x = ts(x, start = c(1971, 1), frequency = 52))
expect_s3_class(out$x, "ts")
})
# output types ------------------------------------------------------------
test_that("mutate preserves grouping", {
gf <- group_by(tibble(x = 1:2, y = 2), x)
i <- count_regroups(out <- mutate(gf, x = 1))
expect_equal(i, 1L)
expect_equal(group_vars(out), "x")
expect_equal(nrow(group_data(out)), 1)
i <- count_regroups(out <- mutate(gf, z = 1))
expect_equal(i, 0)
expect_equal(group_data(out), group_data(gf))
})
test_that("mutate works on zero-row grouped data frame (#596)", {
dat <- data.frame(a = numeric(0), b = character(0), stringsAsFactors = TRUE)
res <- dat %>% group_by(b, .drop = FALSE) %>% mutate(a2 = a * 2)
expect_type(res$a2, "double")
expect_s3_class(res, "grouped_df")
expect_equal(res$a2, numeric(0))
expect_type(group_rows(res), "list")
expect_equal(attr(group_rows(res), "ptype"), integer())
expect_equal(group_data(res)$b, factor(character(0)))
})
test_that("mutate preserves class of zero-row rowwise (#4224, #6303)", {
# Each case needs to test both x and identity(x) because these flow
# through two slightly different pathways.
rf <- rowwise(tibble(x = character(0)))
out <- mutate(rf, x2 = identity(x), x3 = x)
expect_equal(out$x2, character())
expect_equal(out$x3, character())
# including list-of classes of list-cols where possible
rf <- rowwise(tibble(x = list_of(.ptype = character())))
out <- mutate(rf, x2 = identity(x), x3 = x)
expect_equal(out$x2, character())
expect_equal(out$x3, character())
# an empty list is turns into a logical (aka unspecified)
rf <- rowwise(tibble(x = list()))
out <- mutate(rf, x2 = identity(x), x3 = x)
expect_equal(out$x2, logical())
expect_equal(out$x3, logical())
})
test_that("mutate works on empty data frames (#1142)", {
df <- data.frame()
res <- df %>% mutate()
expect_equal(nrow(res), 0L)
expect_equal(length(res), 0L)
res <- df %>% mutate(x = numeric())
expect_equal(names(res), "x")
expect_equal(nrow(res), 0L)
expect_equal(length(res), 1L)
})
test_that("mutate handles 0 rows rowwise (#1300)", {
res <- tibble(y = character()) %>% rowwise() %>% mutate(z = 1)
expect_equal(nrow(res), 0L)
})
test_that("rowwise mutate gives expected results (#1381)", {
f <- function(x) ifelse(x < 2, NA_real_, x)
res <- tibble(x = 1:3) %>% rowwise() %>% mutate(y = f(x))
expect_equal(res$y, c(NA, 2, 3))
})
test_that("rowwise mutate un-lists existing size-1 list-columns (#6302)", {
# Existing column
rf <- rowwise(tibble(x = as.list(1:3)))
out <- mutate(rf, y = x)
expect_equal(out$y, 1:3)
# New column
rf <- rowwise(tibble(x = 1:3))
out <- mutate(rf, y = list(1), z = y)
expect_identical(out$z, c(1, 1, 1))
# Column of data 1-row data frames
rf <- rowwise(tibble(x = list(tibble(a = 1), tibble(a = 2))))
out <- mutate(rf, y = x)
expect_identical(out$y, tibble(a = c(1, 2)))
# Preserves known list-of type
rf <- rowwise(tibble(x = list_of(.ptype = character())))
out <- mutate(rf, y = x)
expect_identical(out$y, character())
# Errors if it's not a length-1 list
df <- rowwise(tibble(x = list(1, 2:3)))
expect_snapshot(mutate(df, y = x), error = TRUE)
})
test_that("grouped mutate does not drop grouping attributes (#1020)", {
d <- data.frame(subject = c("Jack", "Jill"), id = c(2, 1)) %>% group_by(subject)
a1 <- names(attributes(d))
a2 <- names(attributes(d %>% mutate(foo = 1)))
expect_equal(setdiff(a1, a2), character(0))
})
test_that("mutate() hands list columns with rowwise magic to follow up expressions (#4845)", {
test <- rowwise(tibble(x = 1:2))
expect_identical(
test %>%
mutate(a = list(1)) %>%
mutate(b = list(a + 1)),
test %>%
mutate(a = list(1), b = list(a + 1))
)
})
test_that("mutate keeps zero length groups", {
df <- tibble(
e = 1,
f = factor(c(1, 1, 2, 2), levels = 1:3),
g = c(1, 1, 2, 2),
x = c(1, 2, 1, 4)
)
df <- group_by(df, e, f, g, .drop = FALSE)
expect_equal( group_size(mutate(df, z = 2)), c(2, 2, 0) )
})
# other -------------------------------------------------------------------
test_that("no utf8 invasion (#722)", {
skip_if_not(l10n_info()$"UTF-8")
skip_if_not_installed("lobstr")
source("utf-8.txt", local = TRUE, encoding = "UTF-8")
})
test_that("mutate() to UTF-8 column names", {
df <- tibble(a = 1) %>% mutate("\u5e78" := a)
expect_equal(colnames(df), c("a", "\u5e78"))
})
test_that("Non-ascii column names in version 0.3 are not duplicated (#636)", {
local_non_utf8_encoding()
df <- tibble(a = "1", b = "2")
names(df) <- c("a", enc2native("\u4e2d"))
res <- df %>% mutate_all(as.numeric)
expect_equal(names(res), as_utf8_character(names(df)))
})
test_that("mutate coerces results from one group with all NA values (#1463) ", {
df <- tibble(x = c(1, 2), y = c(1, NA))
res <- df %>% group_by(x) %>% mutate(z = ifelse(y > 1, 1, 2))
expect_true(is.na(res$z[2]))
expect_type(res$z, "double")
})
test_that("grouped subsets are not lazy (#3360)", {
make_call <- function(x) {
quo(!!x)
}
res <- tibble(name = 1:2, value = letters[1:2]) %>%
rowwise() %>%
mutate(call = list(make_call(value))) %>%
pull()
expect_identical(res, list(make_call("a"), make_call("b")))
res <- tibble(name = 1:2, value = letters[1:2]) %>%
group_by(name) %>%
mutate(call = list(make_call(value))) %>%
pull()
expect_identical(res, list(make_call("a"), make_call("b")))
})
test_that("mutate() evaluates expression for empty groups", {
df <- tibble(f = factor(c("a", "b"), levels = c("a", "b", "c")))
gf <- group_by(df, f, .drop = FALSE)
count <- 0
mutate(gf, x = {count <<- count + 1})
expect_equal(count, 3L)
})
test_that("DataMask$add() forces chunks (#4677)", {
df <- tibble(bf10 = 0.244) %>%
mutate(
bf01 = 1 / bf10,
log_e_bf10 = log(bf10),
log_e_bf01 = log(bf01)
)
expect_equal(df$log_e_bf01, log(1 / 0.244))
})
test_that("DataMask uses fresh copies of group id / size variables (#6762)", {
df <- tibble(x = 1:2)
fn <- function() {
df <- tibble(a = 1)
# Otherwise, this nested `mutate()` can modify the same
# id/size variable as the outer one, which causes havoc
mutate(df, b = a + 1)
}
out <- mutate(df, y = {fn(); x})
expect_identical(out$x, 1:2)
expect_identical(out$y, 1:2)
})
test_that("mutate() correctly auto-names expressions (#6741)", {
df <- tibble(a = 1L)
expect_identical(mutate(df, -a), tibble(a = 1L, "-a" = -1L))
foo <- "foobar"
expect_identical(mutate(df, foo), tibble(a = 1L, foo = "foobar"))
a <- 2L
expect_identical(mutate(df, a), tibble(a = 1L))
df <- tibble(a = 1L, "a + 1" = 5L)
a <- 2L
expect_identical(mutate(df, a + 1), tibble(a = 1L, "a + 1" = 2))
})
# .by -------------------------------------------------------------------------
test_that("can group transiently using `.by`", {
df <- tibble(g = c(1, 1, 2, 1, 2), x = c(5, 2, 1, 2, 3))
out <- mutate(df, x = mean(x), .by = g)
expect_identical(out$g, df$g)
expect_identical(out$x, c(3, 3, 2, 3, 2))
expect_s3_class(out, class(df), exact = TRUE)
})
test_that("transient grouping retains bare data.frame class", {
df <- data.frame(g = c(1, 1, 2, 1, 2), x = c(5, 2, 1, 2, 3))
out <- mutate(df, x = mean(x), .by = g)
expect_s3_class(out, class(df), exact = TRUE)
})
test_that("transient grouping retains data frame attributes (#6100)", {
# With data.frames or tibbles
df <- data.frame(g = c(1, 1, 2), x = c(1, 2, 1))
tbl <- as_tibble(df)
attr(df, "foo") <- "bar"
attr(tbl, "foo") <- "bar"
out <- mutate(df, x = mean(x), .by = g)
expect_identical(attr(out, "foo"), "bar")
out <- mutate(tbl, x = mean(x), .by = g)
expect_identical(attr(out, "foo"), "bar")
})
test_that("can `NULL` out the `.by` column", {
df <- tibble(x = 1:3)
expect_identical(
mutate(df, x = NULL, .by = x),
new_tibble(list(), nrow = 3)
)
})
test_that("catches `.by` with grouped-df", {
df <- tibble(x = 1)
gdf <- group_by(df, x)
expect_snapshot(error = TRUE, {
mutate(gdf, .by = x)
})
})
test_that("catches `.by` with rowwise-df", {
df <- tibble(x = 1)
rdf <- rowwise(df)
expect_snapshot(error = TRUE, {
mutate(rdf, .by = x)
})
})
# .before, .after, .keep ------------------------------------------------------
test_that(".keep = 'unused' keeps variables explicitly mentioned", {
df <- tibble(x = 1, y = 2)
out <- mutate(df, x1 = x + 1, y = y, .keep = "unused")
expect_named(out, c("y", "x1"))
})
test_that(".keep = 'used' not affected by across() or pick()", {
df <- tibble(x = 1, y = 2, z = 3, a = "a", b = "b", c = "c")
# This must evaluate every column in order to figure out if should
# be included in the set or not, but that shouldn't be counted for
# the purposes of "used" variables
out <- mutate(df, across(where(is.numeric), identity), .keep = "unused")
expect_named(out, names(df))
out <- mutate(df, pick(where(is.numeric)), .keep = "unused")
expect_named(out, names(df))
})
test_that(".keep = 'used' keeps variables used in expressions", {
df <- tibble(a = 1, b = 2, c = 3, x = 1, y = 2)
out <- mutate(df, xy = x + y, .keep = "used")
expect_named(out, c("x", "y", "xy"))
})
test_that(".keep = 'none' only keeps grouping variables", {
df <- tibble(x = 1, y = 2)
gf <- group_by(df, x)
expect_named(mutate(df, z = 1, .keep = "none"), "z")
expect_named(mutate(gf, z = 1, .keep = "none"), c("x", "z"))
})
test_that(".keep = 'none' retains original ordering (#5967)", {
df <- tibble(x = 1, y = 2)
expect_named(df %>% mutate(y = 1, x = 2, .keep = "none"), c("x", "y"))
# even when grouped
gf <- group_by(df, x)
expect_named(gf %>% mutate(y = 1, x = 2, .keep = "none"), c("x", "y"))
})
test_that("can use .before and .after to control column position", {
df <- tibble(x = 1, y = 2)
expect_named(mutate(df, z = 1), c("x", "y", "z"))
expect_named(mutate(df, z = 1, .before = 1), c("z", "x", "y"))
expect_named(mutate(df, z = 1, .after = 1), c("x", "z", "y"))
# but doesn't affect order of existing columns
df <- tibble(x = 1, y = 2)
expect_named(mutate(df, x = 1, .after = y), c("x", "y"))
})
test_that("attributes of bare data frames are retained when `.before` and `.after` are used (#6341)", {
# We require `[` methods to be in charge of keeping extra attributes for all
# data frame subclasses (except for data.tables)
df <- vctrs::data_frame(x = 1, y = 2)
attr(df, "foo") <- "bar"
out <- mutate(df, z = 3, .before = x)
expect_identical(attr(out, "foo"), "bar")
})
test_that(".keep and .before/.after interact correctly", {
df <- tibble(x = 1, y = 1, z = 1, a = 1, b = 2, c = 3) %>%
group_by(a, b)
expect_named(mutate(df, d = 1, x = 2, .keep = "none"), c("x", "a", "b", "d"))
expect_named(mutate(df, d = 1, x = 2, .keep = "none", .before = "a"), c("x", "d", "a", "b"))
expect_named(mutate(df, d = 1, x = 2, .keep = "none", .after = "a"), c("x", "a", "d", "b"))
})
test_that("dropping column with `NULL` then readding it retains original location", {
df <- tibble(x = 1, y = 2, z = 3, a = 4)
df <- group_by(df, z)
expect_named(mutate(df, y = NULL, y = 3, .keep = "all"), c("x", "y", "z", "a"))
expect_named(mutate(df, b = a, y = NULL, y = 3, .keep = "used"), c("y", "z", "a", "b"))
expect_named(mutate(df, b = a, y = NULL, y = 3, .keep = "unused"), c("x", "y", "z", "b"))
# It isn't treated as a "new" column
expect_named(mutate(df, y = NULL, y = 3, .keep = "all", .before = x), c("x", "y", "z", "a"))
})
test_that("setting a new column to `NULL` works with `.before` and `.after` (#6563)", {
df <- tibble(x = 1, y = 2, z = 3, a = 4)
expect_named(mutate(df, b = NULL, .before = 1), names(df))
expect_named(mutate(df, b = 1, b = NULL, .before = 1), names(df))
expect_named(mutate(df, b = NULL, b = 1, .before = 1), c("b", "x", "y", "z", "a"))
expect_named(mutate(df, b = NULL, c = 1, .after = 2), c("x", "y", "c", "z", "a"))
})
test_that(".keep= always retains grouping variables (#5582)", {
df <- tibble(x = 1, y = 2, z = 3) %>% group_by(z)
expect_equal(
df %>% mutate(a = x + 1, .keep = "none"),
tibble(z = 3, a = 2) %>% group_by(z)
)
expect_equal(
df %>% mutate(a = x + 1, .keep = "all"),
tibble(x = 1, y = 2, z = 3, a = 2) %>% group_by(z)
)
expect_equal(
df %>% mutate(a = x + 1, .keep = "used"),
tibble(x = 1, z = 3, a = 2) %>% group_by(z)
)
expect_equal(
df %>% mutate(a = x + 1, .keep = "unused"),
tibble(y = 2, z = 3, a = 2) %>% group_by(z)
)
})
test_that("mutate() preserves the call stack on error (#5308)", {
foobar <- function() stop("foo")
stack <- NULL
expect_error(
withCallingHandlers(
error = function(...) stack <<- sys.calls(),
mutate(mtcars, foobar())
)
)
expect_true(some(stack, is_call, "foobar"))
})
test_that("dplyr data mask can become obsolete", {
lazy <- function(x) {
list(enquo(x))
}
df <- tibble(
x = 1:2
)
res <- df %>%
rowwise() %>%
mutate(y = lazy(x), .keep = "unused")
expect_equal(names(res), c("x", "y"))
expect_error(eval_tidy(res$y[[1]]))
})
test_that("mutate() deals with 0 groups (#5534)", {
df <- data.frame(x = numeric()) %>%
group_by(x)
expect_equal(
mutate(df, y = x + 1),
data.frame(x = numeric(), y = numeric()) %>% group_by(x)
)
expect_snapshot({
mutate(df, y = max(x))
})
})
test_that("functions are not skipped in data pronoun (#5608)", {
f <- function(i) i + 1
df <- tibble(a = list(f), b = 1)
two <- df %>%
rowwise() %>%
mutate(res = .data$a(.data$b)) %>%
pull(res)
expect_equal(two, 2)
})
test_that("mutate() casts data frame results to common type (#5646)", {
df <- data.frame(x = 1:2, g = 1:2) %>% group_by(g)
res <- df %>%
mutate(if (g == 1) data.frame(y = 1) else data.frame(y = 1, z = 2))
expect_equal(res$z, c(NA, 2))
})
test_that("mutate() supports empty list columns in rowwise data frames (#5804", {
res <- tibble(a = list()) %>%
rowwise() %>%
mutate(n = lengths(a))
expect_equal(res$n, integer())
})
test_that("mutate() fails on named empty arguments (#5925)", {
expect_error(
mutate(tibble(), bogus = )
)
})
# Error messages ----------------------------------------------------------
test_that("mutate() give meaningful errors", {
expect_snapshot({
tbl <- tibble(x = 1:2, y = 1:2)
# setting column to NULL makes it unavailable
(expect_error(tbl %>% mutate(y = NULL, a = sum(y))))
(expect_error(tbl %>%
group_by(x) %>%
mutate(y = NULL, a = sum(y))
))
# incompatible column type
(expect_error(tibble(x = 1) %>% mutate(y = mean)))
# Unsupported type"
df <- tibble(g = c(1, 1, 2, 2, 2), x = 1:5)
(expect_error(df %>% mutate(out = env(a = 1))))
(expect_error(df %>%
group_by(g) %>%
mutate(out = env(a = 1))
))
(expect_error(df %>%
rowwise() %>%
mutate(out = rnorm)
))
# incompatible types across groups
(expect_error(
data.frame(x = rep(1:5, each = 3)) %>%
group_by(x) %>%
mutate(val = ifelse(x < 3, "foo", 2))
))
# mixed nulls
(expect_error(
tibble(a = 1:3, b=4:6) %>%
group_by(a) %>%
mutate(if(a==1) NULL else "foo")
))
(expect_error(
tibble(a = 1:3, b=4:6) %>%
group_by(a) %>%
mutate(if(a==2) NULL else "foo")
))
# incompatible size
(expect_error(
data.frame(x = c(2, 2, 3, 3)) %>% mutate(int = 1:5)
))
(expect_error(
data.frame(x = c(2, 2, 3, 3)) %>%
group_by(x) %>%
mutate(int = 1:5)
))
(expect_error(
data.frame(x = c(2, 3, 3)) %>%
group_by(x) %>%
mutate(int = 1:5)
))
(expect_error(
data.frame(x = c(2, 2, 3, 3)) %>%
rowwise() %>%
mutate(int = 1:5)
))
(expect_error(
tibble(y = list(1:3, "a")) %>%
rowwise() %>%
mutate(y2 = y)
))
(expect_error(
data.frame(x = 1:10) %>% mutate(y = 11:20, y = 1:2)
))
# .data pronoun
(expect_error(
tibble(a = 1) %>% mutate(c = .data$b)
))
(expect_error(
tibble(a = 1:3) %>%
group_by(a) %>%
mutate(c = .data$b)
))
# obsolete data mask
lazy <- function(x) list(enquo(x))
res <- tbl %>%
rowwise() %>%
mutate(z = lazy(x), .keep = "unused")
(expect_error(
eval_tidy(res$z[[1]])
))
# Error that contains {
(expect_error(
tibble() %>% mutate(stop("{"))
))
})
})
test_that("mutate() errors refer to expressions if not named", {
expect_snapshot({
(expect_error(mutate(mtcars, 1:3)))
(expect_error(mutate(group_by(mtcars, cyl), 1:3)))
})
})
test_that("`mutate()` doesn't allow data frames with missing or empty names (#6758)", {
df1 <- new_data_frame(set_names(list(1), ""))
df2 <- new_data_frame(set_names(list(1), NA_character_))
expect_snapshot(error = TRUE, {
mutate(df1)
})
expect_snapshot(error = TRUE, {
mutate(df2)
})
})
|
3748ae2114d9edbf7e57da46b5d9957f10db586b
|
3578d3e6b04ff37d299980d648a64dcf26e95e09
|
/R/hmm.R
|
aaf00468055e645b588df77340017f697b3459aa
|
[] |
no_license
|
eduardoscopel/util
|
ae6342d9c30a7d0cec55b160301ec9e7bb0db26d
|
f306a2dc4589522d0d5d5b130fdcf21c98cd1db9
|
refs/heads/master
| 2021-05-30T00:25:48.120182
| 2015-12-30T20:26:48
| 2015-12-30T20:26:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,896
|
r
|
hmm.R
|
## --- hmm.R --- ##
## Date: 10 March 2015
## Purpose: Finally implement a general HMM for myself in pure R, allowing time-varying transition and emission probabilities.
rescale <- function(x, y = 1) {
if (sum(x) > 0)
y*x/sum(x)
else
rep(0, length(x))
}
## initialize an HMM object with known transition and emission probabilities
init.hmm <- function(x, tprob, emit, ...) {
## set length of observed sequence
if (!length(x))
stop("Observed sequence must have strictly positive length.")
else if (length(x) == 1)
nobs <- as.integer(ceiling(abs(x)))
else
nobs <- length(x)
## check validity of transition matrix
if (length(dim(tprob)) < 2 | !(is.matrix(tprob)|is.array(tprob)) | !is.numeric(tprob))
stop("Transition probabilities are likely misspecified; please supply a numeric matrix with >=2 dimensions.")
else if (length(dim(tprob)) < 3) {
## if transition probs are not time-varying, pretend like they are
.tprob <- array(dim = c(nobs, dim(tprob)))
for (i in 1:nobs)
.tprob[i,,] <- tprob
tprob <- .tprob
}
## normalize transition matrix to columns and rows sum to unity
for (i in 1:nobs) {
if (!identical(tprob[i,,], t(tprob[i,,])))
stop("Oops -- transition matrix should be symmetric.")
tprob[i,,] <- tprob[i,,]/rowSums(tprob[i,,])
if ( any(colSums(tprob[i,,]) != 1) | any(rowSums(tprob[i,,]) != 1) )
stop("Normalization of transition matrix failed.")
}
if (length(dim(emit)) < 2 | !is.matrix(emit) | !is.numeric(emit))
stop("Emission probabilities are likely misspecified; please supply a numeric matrix with >= 2 dimensions.")
else if (length(dim(emit)) < 3) {
## if transition probs are not time-varying, pretend like they are
.emit <- array(dim = c(nobs, dim(emit)))
for (i in 1:nobs)
.emit[i,,] <- emit
emit <- .emit
}
## normalize emission probabilities so that rows sum to unity
for (i in 1:nobs) {
emit[i,,] <- emit[i,,]/rowSums(emit[i,,])
if ( any(rowSums(tprob[i,,]) != 1) )
stop("Normalization of emission matrix failed.")
}
#print(any(is.na(tprob)))
#print(any(is.na(emit)))
nstates <- dim(emit)[2]
nsymb <- dim(emit)[3]
obs <- NULL
if (length(x) > 1)
obs <- x
if (length(x) > 1)
if (length(unique(x)) != nsymb)
warning(paste0("Number of observed symbols (",length(unique(x)),
") is different than corresponding dimension of emission matrix (",nsymb,"). ",
"This could be okay but you should check the input."))
hmm <- list(nstates = nstates, nsymb = nsymb, nobs = nobs,
tprob = tprob, emit = emit, obs = obs)
class(hmm) <- c("hmm", class(hmm))
return(hmm)
}
## get posterior decoding, given transition and emission probs in <hmm>
viterbi <- function(hmm, x = NULL, progress = TRUE, ...) {
if (!inherits(hmm, "hmm"))
warning("Are you sure this is an object of class 'hmm'? Proceeding with skepticism...")
## initalize observed state sequence
if (is.null(x))
x <- hmm$obs
else
if (length(x) != hmm$nobs)
stop("Observation sequence provided doesn't match the one with which the model was intialized.")
x <- as.numeric(factor(x, nmax = hmm$nsymb))
log1p <- function(x) log(1+x)
## from Christinanini & Hahn (2007) pp75-77
V <- matrix(0, nrow = hmm$nstates, ncol = hmm$nobs+1)
tprob <- hmm$tprob
emit <- hmm$emit
V[,1] <- log1p(1/nrow(V))
tb <- matrix(0, nrow = hmm$nstates, ncol = hmm$nobs)
if (progress)
pb <- txtProgressBar(min = 0, max = ncol(V)-1, style = 3)
for (i in 2:ncol(V)) {
for (k in 1:nrow(V)) {
mu <- V[ ,i-1 ] + log1p(tprob[ i-1,,k ])
#best <- which.max(mu)
#tb[ k,i-1 ] <- best
V[ k,i ] <- max(mu) + log1p(emit[ i-1,k,x[i-1] ])
last <- which.max(V[,i])
}
if (progress)
setTxtProgressBar(pb, i-1)
}
#print(tb)
#print(last)
#decoded <- rep(0, hmm$nobs)
#decoded[ hmm$nobs ] <- last
#for (i in ncol(V):2) {
# decoded[i] <- tb[ last, i-1 ]
# last <- tb[ last,i-1 ]
#}
decoded <- apply(V, 2, which.max)
hmm$decoded <- decoded[-1]
hmm$encoded <- x
return(hmm)
}
simulate.hmm <- function(hmm, n, init = NULL, ...) {
if (!inherits(hmm, "hmm"))
warning("Are you sure this is an object of class 'hmm'? Proceeding with skepticism...")
if (n < 1)
stop("Can only simulate sequences of length >= 1.")
if (n > hmm$nobs)
stop("Can only simulate sequences of length <= nobs. Initialize a longer HMM if you need more.")
tprob <- hmm$tprob
emit <- hmm$emit
## generate hidden state sequence
if (is.null(init))
init <- rep(1/hmm$nstates, hmm$nstates)
states <- rep(0, n)
states[1] <- apply(rmultinom(1, 1, init), 2, which.max)
for (j in 2:n) {
states[j] <- apply(rmultinom(1, 1, tprob[j-1,states[j-1],]), 2, which.max)
}
## simulate observed states conditional on hidden states
obs <- rep(0, n)
for (i in 1:n) {
obs[i] <- apply(rmultinom(1, 1, emit[ i,states[i], ]), 2, which.max)
}
return( list(hidden = states, obs = obs) )
}
|
d57634470ba90f7e59f23e570057d375a0d770d9
|
af77cc9ccadb9cf4d451831fdd07abe13503a879
|
/yelp/wekafiles/packages/RPlugin/mlr/mlr/R/control.seq.r
|
2b559497dd6bd92a936c8c9dc3f10adf99de3712
|
[] |
no_license
|
tummykung/yelp-dataset-challenge
|
7eed6a4d38b6c9c90011fd09317c5fa40f9bc75c
|
84f12682cba75fa4f10b5b3484ce9f6b6c8dad4a
|
refs/heads/master
| 2021-01-18T14:10:55.722349
| 2013-05-21T09:30:37
| 2013-05-21T09:30:37
| 9,527,545
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,437
|
r
|
control.seq.r
|
#' @include control.varsel.r
roxygen()
#' @exportClass sequential.control
#' @rdname sequential.control
setClass(
"sequential.control",
contains = c("varsel.control"),
representation = representation(
method = "character",
alpha = "numeric",
beta = "numeric"
)
)
#' Constructor.
setMethod(
f = "initialize",
signature = signature("sequential.control"),
def = function(.Object, minimize, tune.threshold, thresholds, path, max.vars, method, alpha, beta) {
.Object = callNextMethod(.Object, minimize, tune.threshold=tune.threshold, thresholds, path=path,
maxit=.Machine$integer.max, max.vars=max.vars)
.Object@alpha = alpha
.Object@beta = beta
.Object@method = method
return(.Object)
}
)
#' Control structure for sequential variable selection.
#'
#' @param minimize [logical] \cr
#' Minimize performance measure? Default is TRUE.
#' @param tune.threshold [logical] \cr
#' Perform empirical thresholding? Default is FALSE. Only supported for binary classification and you have to set predict.type to "prob" for this in make.learner.
#' @param thresholds [numeric] \cr
#' Number of thresholds to try in tuning. Predicted probabilities are sorted and divided into groups of equal size. Default is 10.
#' @param path [boolean]\cr
#' Should optimization path be saved?
#' @param max.vars [integer] \cr
#' Maximal number of allowed variables in the final set. Default is max. integer.
#' @param method [\code{\link{character}}] \cr
#' Search method. Currently supported are sequential forward search "sfs", sequential backward search "sbs",
#' sequential floating forward search "sffs", sequential floating backward search "sfbs". Default is "sfs".
#' @param alpha [numeric] \cr
#' sfs, sffs: In a forward step, minimal improvement of performance measure. Can be negative.
#' @param beta [numeric] \cr
#' sbs, sfbs: In a backward step, minimal improvement of performance measure. Can be negative.
#'
#' @return Control structure.
#' @exportMethod sequential.control
#' @rdname sequential.control
#' @title Control structure for sequential variable selection.
setGeneric(
name = "sequential.control",
def = function(minimize, tune.threshold, thresholds, path, max.vars, method, alpha, beta) {
if (missing(minimize))
minimize=TRUE
if (missing(tune.threshold))
tune.threshold=FALSE
if (missing(thresholds))
thresholds=10
if (is.numeric(thresholds))
thresholds = as.integer(thresholds)
if (missing(path))
path = FALSE
if (missing(max.vars))
max.vars = .Machine$integer.max
if (is.numeric(max.vars))
max.vars = as.integer(max.vars)
if (missing(method))
method="sfs"
if (missing(alpha))
alpha=0.01
if (missing(beta))
beta=0.01
standardGeneric("sequential.control")
}
)
#' @rdname sequential.control
setMethod(
f = "sequential.control",
signature = signature(minimize="logical", tune.threshold="logical", thresholds="integer", path="logical",
max.vars="integer", method="character", alpha="numeric", beta="numeric"),
def = function(minimize, tune.threshold, thresholds, path, max.vars, method, alpha, beta) {
new("sequential.control", minimize=minimize, tune.threshold=tune.threshold, thresholds=thresholds, path=path,
max.vars=max.vars, method=method, alpha=alpha, beta=beta)
}
)
|
1ef460b9755df2c694a48de170a263576b94265d
|
d60b8cbc13369977ad63ec7d4600a8caafbc5b57
|
/shiny/server copy.R
|
62b3ae781a03888b99da72724dc6b70d557855f7
|
[] |
no_license
|
aokay/projects
|
8aa7b4d7bac162b8b76d854dbcf50830633b4c73
|
46b67adab3d53d847c2448fd65ff962472052fcd
|
refs/heads/master
| 2021-01-22T07:27:42.453285
| 2015-03-02T11:45:03
| 2015-03-02T11:45:03
| 30,963,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
server copy.R
|
library(shiny)
# Rely on the 'WorldPhones' dataset in the datasets
# package (which generally comes preloaded).
library(datasets)
# Define a server for the Shiny app
shinyServer(function(input, output) {
# Fill in the spot we created for a plot
output$bikesharePlot <- renderPlot({
train_full$day <- weekdays(as.Date(train_full$datetime))
train_full$time<- strptime(train_full$datetime, format="%Y-%m-%d %H:%M:%S")
train_full$month<-format(time,'%b')
day_hour_counts <- as.data.frame(aggregate(train_full[grep(input$months,train_full$month),"count"], list(train_full[grep(input$months,train_full$month),]$day, train_full[grep(input$months,train_full$month),]$time$hour), mean))
day_hour_counts$Group.1 <- factor(day_hour_counts$Group.1, ordered=TRUE, levels=c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
day_hour_counts$hour <- as.numeric(as.character(day_hour_counts$Group.2))
ggplot(day_hour_counts, aes(x = hour, y = Group.1)) + geom_tile(aes(fill = x)) + scale_fill_gradient(name="Average Counts", low="white", high="purple") + theme(axis.title.y = element_blank())
})
})
|
17f9ca53a0293ed707fe31220956f4a93147f624
|
7ca3dfeff09d362063e18a458bce38e1983247fe
|
/man/reproduce_emeans_effect_size_COMPLETENESS.Rd
|
fa0956d90498bccdb1d973353996e64694fb1377
|
[] |
no_license
|
karacitir/reproducerTaskGra
|
e390b5d56cae74f78c088cca17506a4a279c867a
|
9d81ebe1e312e4f74dbf615511ecc91bba276852
|
refs/heads/master
| 2020-04-12T05:13:44.721531
| 2019-01-30T17:04:03
| 2019-01-30T17:04:03
| 161,769,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 758
|
rd
|
reproduce_emeans_effect_size_COMPLETENESS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lmm_funcions.R
\name{reproduce_emeans_effect_size_COMPLETENESS}
\alias{reproduce_emeans_effect_size_COMPLETENESS}
\title{Reproduce effect sizes based on estimated marginal means}
\usage{
reproduce_emeans_effect_size_COMPLETENESS()
}
\value{
a data frame containing estimated marginal means for Completeness computed for each task
and task description granularity, the difference in estimated marginal means between the
coarser-grained and finer-grained levels and the effect size computed as the percentage increase
in the estimated marginal means
}
\description{
Reproduce effect sizes based on estimated marginal means
}
\examples{
reproduce_emeans_effect_size_COMPLETENESS()
}
|
6ca241bc5a048b0cd3708c7102378f48173729d8
|
77c4f4dd27b8d7497e66a7a5a87ad7ea83f2c4be
|
/dev/tasks/conda-recipes/r-arrow/install.libs.R
|
005bbe16b9984b773ce4c72b03f9c31d53cfafa3
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"ZPL-2.1",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"NTP",
"OpenSSL",
"CC-BY-4.0",
"LLVM-exception",
"Python-2.0",
"CC0-1.0",
"LicenseRef-scancode-protobuf",
"JSON",
"Zlib",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/arrow
|
0714bfbf6fd491e1f4ed4acf838845ce4b94ec3e
|
59954225d4615f9b3bd7a3c266fb68761794229a
|
refs/heads/main
| 2023-08-24T09:04:22.253199
| 2023-08-24T07:21:51
| 2023-08-24T07:21:51
| 51,905,353
| 12,955
| 3,585
|
Apache-2.0
| 2023-09-14T20:45:56
| 2016-02-17T08:00:23
|
C++
|
UTF-8
|
R
| false
| false
| 346
|
r
|
install.libs.R
|
src_dir <- file.path(R_PACKAGE_SOURCE, "src", fsep = "/")
dest_dir <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep="/")
dir.create(file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep="/"), recursive = TRUE, showWarnings = FALSE)
file.copy(file.path(src_dir, "arrow.dll", fsep = "/"), file.path(dest_dir, "lib_arrow.dll", fsep = "/"))
|
f664971a64b0a0dd5f42a01f6b56cd708df10edc
|
317ab2d664c90292aaa3c98d55e3970c5b9089e1
|
/FS12b_16S.R
|
d4679adaf777a459ca9396e3350200ae6331bd93
|
[] |
no_license
|
Jtrachsel/FS12
|
a35dd5d190d3ccd78c057e9028de2fd90022afde
|
2cbbad7db6583592928ab98391d7925aca92c1c5
|
refs/heads/master
| 2021-07-09T08:28:18.223628
| 2020-07-30T17:44:41
| 2020-07-30T17:44:41
| 173,172,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91,209
|
r
|
FS12b_16S.R
|
library(phyloseq)
library(tidyverse)
library(funfuns)
# library(pairwiseAdonis)
library(DESeq2)
library(vegan)
library(funfuns)
meta <- read.csv('./data/FS12_final_meta.csv', header = TRUE, stringsAsFactors = FALSE)
shared <- read_delim('./data/FS12.shared', delim = '\t') %>% as.data.frame()
taxa <- extract_mothur_tax('./data/FS12.taxonomy')
rownames(shared) <- shared$Group
shared <- shared[,-c(1,2,3)]
hist(rowSums(shared), breaks = 50)
# length(colnames(shared)) # 10648 total OTUs detected (no removal mocks and NTCs and all kinds of garbage here)
# rownames(shared) %in% meta$sample_ID
meta <- meta[meta$sample_ID %in% rownames(shared),]
# mocks <- shared[(!rownames(shared) %in% meta$sample_ID),]
shared <- shared[rownames(shared) %in% meta$sample_ID,]
rownames(shared) == meta$sample_ID
rownames(meta) <- meta$sample_ID
shared <- shared[rowSums(shared) > 1250,] # remove samples with less than 1250 reads
shared <- shared[,colSums(shared > 0) > 3] # removes otus that are detected in fewer than 4 samples globally (including timecourse data)
shared <- shared[,colSums(shared) > 5] # at least 6 observations globally
length(colnames(shared))
hist(rowSums(shared), breaks = 50)
meta <- meta[order(meta$sample_ID),]
shared <- shared[order(rownames(shared)),]
rownames(shared) == meta$sample_ID
meta$set <- paste(meta$experiment, meta$day, meta$tissue, meta$treatment, sep = '_')
nnnn <- meta %>% group_by(set) %>% summarise(N=n())
rownames(meta) == rownames(shared)
meta <- meta[rownames(meta) %in% rownames(shared),]
rownames(shared) %in% rownames(meta)
shared <- shared[match(rownames(meta), rownames(shared)),]
rownames(meta) == rownames(shared)
# taxa <- taxa[taxa$OTU %in% colnames(shared),]
p_meta <- sample_data(meta)
p_taxa <- import_mothur(mothur_constaxonomy_file = './data/FS12.taxonomy')
colnames(p_taxa) <- colnames(taxa)[-c(1,2,3)]
# meta$treatment
FS12 <- phyloseq(p_meta, p_taxa, otu_table(shared, taxa_are_rows = FALSE)) # builds the phyloseq obj
WRITE_ME <- FS12@sam_data %>% as(Class = 'matrix') %>% as.data.frame()
WRITE_ME <- WRITE_ME %>%
filter(pignum != 101) %>%
dplyr::select(sample_ID, everything(), -ends_with('ate'), -weight, -pig_day_tissue, -set) %>%
arrange(experiment, tissue, day, treatment) %>%
mutate(env_medium=case_when(tissue == 'F' ~ 'feces',
tissue == 'C' ~ 'cecal contents',
tissue == 'X' ~ 'cecal mucosa',
tissue == 'I' ~ 'ileal mucosa',
tissue == 'Q' ~ 'fecal tetrathionate broth enrichment',
TRUE ~ 'this should never happen'),
collect_date=case_when(experiment == 'X12b' & day == 'D0' ~ '2017-10-01',
experiment == 'X12b' & day == 'D2' ~ '2017-10-03',
experiment == 'X12b' & day == 'D7' ~ '2017-10-08',
experiment == 'X12b' & day == 'D14' ~ '2017-10-15',
experiment == 'X12b' & day == 'D21' ~ '2017-10-22',
experiment == 'X12a' & day == 'D0' ~ '2017-09-01',
experiment == 'X12a' & day == 'D3' ~ '2017-09-04',
experiment == 'X12a' & day == 'D9' ~ '2017-09-10',
experiment == 'X12a' & day == 'D14' ~ '2017-09-15',
experiment == 'X12a' & day == 'D17' ~ '2017-09-18',
experiment == 'X12a' & day == 'D23' ~ '2017-09-24',
experiment == 'X12a' & day == 'D30' ~ '2017-10-01',
TRUE ~ 'SOMETHINGSWRONG'))
WRITE_ME %>% write_tsv('FS12_meta_for_ncbi.tsv')
SRA_DATA <- WRITE_ME %>%
dplyr::select(sample_ID, env_medium) %>%
mutate(filename_1 = paste0(sample_ID, '_R1.fq.gz'),
filename_2 = paste0(sample_ID, '_R2.fq.gz'),
title = paste('16S rRNA gene amplicon sequencing of Sus Scrofa gut derived metagenome:', env_medium)) %>%
write_tsv('SRA_metadata.tsv')
SRA_DATA %>% dplyr::pull(filename_1) %>% write_lines('R1_good.txt')
SRA_DATA %>% dplyr::pull(filename_2) %>% write_lines('R2_good.txt')
### mock stuff ###
#
# rownames(mocks)
# mocks[1:5,1:5]
# mocks <- mocks[rowSums(mocks) >1000,]
# rownames(mocks)
# hist(rowSums(mocks), breaks = 50)
#
# mocks <- mocks[grep('NTC', rownames(mocks)),]
#
# mocks <- mocks[,colSums(mocks)>2]
# colSums(mocks)
# rownames(mocks)
# mock_tax <- taxa[taxa$OTU %in% colnames(mocks),]
##### NMDS #####
# prob should write these out #
FS12b <- subset_samples(FS12, experiment == 'X12b' & pignum != 101)
FS12b <- subset_samples(FS12b, treatment %in% c('Control', 'RPS', 'Acid', 'RCS'))
# FS12b <- subset_samples(FS12b, treatment %in% c('Control', 'Bglu'))
# making sure factors are set correctly
# FS12b@sam_data$treatment <- factor(FS12b@sam_data$treatment, levels = c('Control', 'RPS', 'Acid','ZnCu', 'RCS', 'Bglu'))
FS12b@sam_data$treatment <- factor(FS12b@sam_data$treatment, levels = c('Control', 'RPS', 'Acid','RCS'))
FS12b@sam_data$set
FS12b <- prune_samples(!is.na(FS12b@sam_data$treatment) & FS12b@sam_data$tissue != 'Q', FS12b)
FS12b <- prune_taxa(taxa_sums(FS12b) > 10, FS12b) # removes sequences that occur less than 10 times globally
############ tax4fun?
# think i need species level classification for this #
# FS12b@sam_data
# form <- formula('~treatment*day')
# themetagenomics::prepare_data(otu_table = data.frame(FS12b@otu_table),
# metadata = data.frame(FS12b@sam_data),
# tax_table = data.frame(FS12b@tax_table),
# rows_are_taxa = FALSE, formula = formula)
# min(sample_sums(FS12b))
# taxa_sums(FS12b) > 2
# grep('Clostridium_sensu_stricto_1', FS12b@tax_table[,6])
### Something here changed.... my NMDS_ellipse function doesnt work this way anymore..
### apparently now i need to remove all phyloseqiness from the OTUtable... should probably look into this.
FS12b_feces <- FS12b %>% prune_samples(samples = FS12b@sam_data$tissue =='F')
FS12b_feces_meta <- FS12b_feces@sam_data %>% data.frame()
FS12b_feces_OTU <- rrarefy(FS12b_feces@otu_table, min(rowSums(FS12b_feces@otu_table))) %>%
data.frame()
FS12b_feces_nmds <- NMDS_ellipse(metadata = FS12b_feces_meta,
OTU_table = FS12b_feces_OTU,
grouping_set = 'set',distance_method = 'bray')
FS12b_metanmds <- NMDS_ellipse(metadata = FS12b@sam_data,
OTU_table = data.frame(rrarefy(FS12b@otu_table, min(rowSums(FS12b@otu_table)))),
grouping_set = 'set',distance_method = 'bray')
FS12b_metanmds
nums <- FS12b_metanmds[[1]] %>% group_by(set) %>% summarise(N=n())
FS12b_metanmds[[1]]
FS12b_metanmds[[3]]
x <- envfit(ord=FS12b_feces_nmds[[3]], env=FS12b_feces_nmds[[1]]$AULC)
# envfit(ord=FS12b_metanmds[[3]], env=FS12b_metanmds[[1]]$log_sal)
plot(FS12b_feces_nmds[[3]])
plot(x)
FS12b_feces_nmds[[1]] %>%
ggplot(aes(x=MDS1, y=MDS2, color=treatment))+
geom_point() +
geom_text(aes(label=pignum))
#### IS pig 181 swapped with pig 97? ####
#### Doesnt appear to be.... make some stacked bars...
# swapp <- FS12b %>% prune_samples(samples = FS12b@sam_data$pignum %in% c('97', '181') & FS12b@sam_data$tissue == 'F')
#
# swapp_meta <- swapp@sam_data %>% data.frame()
#
# swapp_OTU <- rrarefy(swapp@otu_table, min(rowSums(swapp@otu_table))) %>% data.frame()
#
#
# swapp_nmds <- NMDS_ellipse(metadata = swapp_meta,
# OTU_table = swapp_OTU,
# grouping_set = 'pignum',
# distance_method = 'bray')
#
#
#
#
# swapp_nmds[[1]] %>%
# ggplot(aes(x=MDS1, y=MDS2, color=pignum))+
# geom_point() +
# geom_text(aes(label=sample_ID))
#
# ##
#
#
#
# swapp_melt <- swapp %>% rarefy_even_depth() %>% psmelt()
#
#
# swapp_melt %>% group_by(Order) %>% summarise()
#
# swapp_melt %>% ggplot(aes(x=pignum, y=Abundance, fill=Order)) + geom_col() + facet_wrap(~day)
#
# #
#
# gooduns <- swapp_melt %>%
# group_by(Order) %>%
# summarise(tot_abund=sum(Abundance)) %>%
# arrange(desc(tot_abund)) %>%
# top_n(10) %>% select(Order) %>% unlist()
#
#
#
# swapp_melt <- swapp_melt %>% mutate(Order2=case_when(
# as.character(Order) %in% gooduns ~ as.character(Order),
# TRUE ~ 'other'))
#
# swapp_melt %>% ggplot(aes(x=pignum, y=Abundance, fill=Order2)) + geom_col() + facet_wrap(~day)
#
# #
# # stacked bars for control vs RPS #
#
# fec_stacks <- FS12b %>%
# prune_samples(samples = FS12b@sam_data$treatment %in% c('Control', 'RPS') & FS12b@sam_data$tissue == 'F')
#
# fec_stacks_melt <- fec_stacks %>% rarefy_even_depth() %>% psmelt()
#
#
# # fec_stacks_melt %>% group_by(Order) %>% summarise()
#
# # fec_stacks_melt %>% ggplot(aes(x=pignum, y=Abundance, fill=Order)) + geom_col() + facet_wrap(~day)
#
# #
#
# gooduns <- fec_stacks_melt %>%
# group_by(Order) %>%
# summarise(tot_abund=sum(Abundance)) %>%
# arrange(desc(tot_abund)) %>%
# top_n(10) %>% select(Order) %>% unlist()
#
#
#
# fec_stacks_melt <- fec_stacks_melt %>% mutate(Order2=case_when(
# as.character(Order) %in% gooduns ~ as.character(Order),
# TRUE ~ 'other'))
#
# fec_stacks_melt %>%
# ggplot(aes(x=treatment, y=Abundance, fill=Order2)) +
# geom_col() +
# facet_wrap(~day)
#
#
# unique(fec_stacks_melt$treatment)
#
# # # this is garbage here #
# # fec_stacks_melt %>%
# # group_by(treatment, day, Order2) %>%
# # summarise(percent_abund=Abundance/) %>%
# # ggplot(aes(x=treatment, y=percent_abund, fill=Order2)) +
# # geom_col() +
# # facet_wrap(~day)
#
# #
#
# ###
sample_sums(FS12b)
# transform_sample_counts()
# rarefy_even_depth(FS12b)@otu_table
# I DONT THNK THIS DIST IS FROM A RRAREFIED OTU TABLE #
# Fixed #
FS12b_jac <- vegdist(rarefy_even_depth(FS12b)@otu_table, method = 'bray')
FS12b_jac
attr(FS12b_jac, which = 'Labels') == FS12b@sam_data$sample_ID
dispers <- betadisper(FS12b_jac, group = FS12b@sam_data$set)
pdispers <- permutest(dispers, pairwise = TRUE)
# pdispers$pairwise$observed
dispersdf <- data.frame(dispers$distances)
dispersdf$group <- rownames(dispersdf)
FS12b@sam_data$sample_ID == dispersdf$group
meta$sample_ID %in% dispersdf$group
colnames(dispersdf)[2] <- 'sample_ID'
FS12b_meta <- FS12b_metanmds[[1]]
FS12b_meta <- merge(FS12b_meta, dispersdf, by='sample_ID')
FS12b_meta$day <- as.numeric(gsub('D', '', FS12b_meta$day))
FS12b_meta$dayfact <- factor(FS12b_meta$day)
# FS12b_meta$treatment <- factor(FS12b_meta$treatment, levels = c('Control', 'RPS', 'Acid','ZnCu', 'RCS', 'Bglu'))
FS12b_meta$treatment <- factor(FS12b_meta$treatment, levels = c('Control', 'RPS', 'Acid', 'RCS'))
FS12b_meta$shan <- diversity(rrarefy(FS12b@otu_table, min(rowSums(FS12b@otu_table))))
FS12b_meta$rich <- specnumber(rrarefy(FS12b@otu_table, min(rowSums(FS12b@otu_table))))
FS12b_meta$even <- FS12b_meta$shan/log(FS12b_meta$rich)
#fecal shannon
FS12b_meta %>%
filter(tissue == 'F') %>%
ggplot(aes(x=treatment, y=shan, group=set, fill = treatment)) +
geom_boxplot() +
facet_wrap(~day, nrow = 1)+
scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
ggtitle('Fecal Shannon Diversity (alpha) over time') +
geom_jitter(shape = 21, stroke=1.2, size=2, width = .2)
#fecal even
FS12b_meta %>%
filter(tissue == 'F') %>%
ggplot(aes(x=treatment, y=even, group=set, fill = treatment)) +
geom_boxplot() +
facet_wrap(~day, nrow = 1)+
scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
ggtitle('Fecal evenness over time')+
geom_jitter(shape = 21, stroke=1.2, size=2, width = .2)
#fecal rich
FS12b_meta %>%
filter(tissue == 'F') %>%
ggplot(aes(x=treatment, y=rich, group=set, fill = treatment)) +
geom_boxplot() +
facet_wrap(~day, nrow = 1)+
scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
ggtitle('Fecal richness (num OTUs) over time')+
geom_jitter(shape = 21, stroke=1.2, size=2, width = .2)
#fecal dispersion
FS12b_meta %>%
filter(tissue == 'F') %>%
ggplot(aes(x=treatment, y=dispers.distances, group=set, fill = treatment)) +
geom_boxplot() +
facet_wrap(~day, nrow = 1)+
scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
ggtitle('Fecal community dispersion over time')+
geom_jitter(shape = 21, stroke=1.2, size=2, width = .2)
# FS12b_meta %>%
# filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=dispers.distances, group=pignum, color=treatment)) +
# geom_line()
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=shan, group=pignum, color=treatment)) + geom_line()
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=dispers.distances, group=pignum, color=treatment)) +
# geom_line() + geom_point()
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=rich, group=pignum, color=treatment)) +
# geom_line() + geom_point()
#
#### CHANGE TO ANOVA TESTS FOR ALPHA AND DISPERSION ####
#
# get_pairs <- function(df){
# pp <- pairwise.wilcox.test(df$dispers.distances, df$treatment, p.adjust.method = 'none')
# ppdf <- as.data.frame(pp$p.value)
# ps <- data.frame(matrix(c(pp$p.value), nrow = 1))
# names(ps) <- paste(c(rep(names(ppdf), each = nrow(ppdf))), "_vs_", rep(rownames(ppdf), ncol(ppdf)), sep = "")
# ps
#
# }
library(broom)
disper_fecal_tests <- FS12b_meta %>%
filter(tissue =='F') %>%
group_by(day) %>%
nest() %>%
mutate(ANOVA = map(data, ~ aov(data=., formula = dispers.distances ~ treatment)),
TUK = map(ANOVA, TukeyHSD),
tid_tuk=map(TUK, tidy)) %>%
select(day, tid_tuk) %>% unnest(cols = c(tid_tuk))# %>% select(day, starts_with('control'))
disper_fecal_tests %>% filter(grepl('Control', comparison)) %>%
filter(adj.p.value < 0.05)
#
# get_pairs <- function(df){
# pp <- pairwise.wilcox.test(df$shan, df$treatment, p.adjust.method = 'none')
# ppdf <- as.data.frame(pp$p.value)
# ps <- data.frame(matrix(c(pp$p.value), nrow = 1))
# names(ps) <- paste(c(rep(names(ppdf), each = nrow(ppdf))), "_vs_", rep(rownames(ppdf), ncol(ppdf)), sep = "")
# ps
#
# }
#
# shan_fecal_tests <- FS12b_meta %>% filter(tissue =='F') %>% group_by(day) %>%
# nest() %>% mutate(pps = map(data, get_pairs)) %>%
# select(day, pps) %>% unnest() %>% select(day, starts_with('control'))
shan_fecal_tests <- FS12b_meta %>%
filter(tissue =='F') %>%
group_by(day) %>%
nest() %>%
mutate(ANOVA = map(data, ~ aov(data=., formula = shan ~ treatment)),
TUK = map(ANOVA, TukeyHSD),
tid_tuk=map(TUK, tidy)) %>%
select(day, tid_tuk) %>% unnest(cols = c(tid_tuk))
shan_fecal_tests %>% filter(grepl('Control', comparison)) %>%
filter(adj.p.value < 0.05)
#
# # dispersion tissues
# FS12b_meta %>% filter(tissue == 'X') %>% ggplot(aes(x=treatment, y=dispers.distances, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
# FS12b_meta %>% filter(tissue == 'C') %>% ggplot(aes(x=treatment, y=dispers.distances, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
# FS12b_meta %>% filter(tissue == 'I') %>% ggplot(aes(x=treatment, y=dispers.distances, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
# # shannon tissues
# FS12b_meta %>% filter(tissue == 'X') %>% ggplot(aes(x=treatment, y=shan, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
# FS12b_meta %>% filter(tissue == 'C') %>% ggplot(aes(x=treatment, y=shan, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
# FS12b_meta %>% filter(tissue == 'I') %>% ggplot(aes(x=treatment, y=shan, group=set, fill = treatment)) + geom_boxplot() +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) #+ geom_text(aes(label=pignum))
#
#
df_ell <- FS12b_metanmds[[2]]
df_ell$experiment <- gsub('(.*)_(.*)_(.*)_(.*)','\\1', df_ell$group)
df_ell$day <- gsub('(.*)_(.*)_(.*)_(.*)','\\2', df_ell$group)
df_ell$day <- gsub('D', '', df_ell$day)
df_ell$day <- factor(df_ell$day, levels = c(0, 2, 7, 14, 21))
df_ell$tissue <- gsub('(.*)_(.*)_(.*)_(.*)','\\3', df_ell$group)
df_ell$treatment <- gsub('(.*)_(.*)_(.*)_(.*)','\\4', df_ell$group)
FS12b_meta$day <- factor(FS12b_meta$day, levels = c(0, 2, 7, 14, 21))
FS12b_meta$treatment <- factor(FS12b_meta$treatment, levels = c('Control', 'RPS', 'Acid','ZnCu', 'RCS', 'Bglu'))
df_ell$treatment <- factor(df_ell$treatment, levels = c('Control', 'RPS', 'Acid','ZnCu', 'RCS', 'Bglu'))
greys <- FS12b_meta
### adding feces only coordinates ###
feces_nmds <- FS12b_feces_nmds[[1]]
FS12b_meta <- feces_nmds %>% select(sample_ID, MDS1, MDS2) %>%
mutate(fMDS1=MDS1, fMDS2=MDS2) %>%
select(sample_ID, fMDS1, fMDS2) %>%
right_join(FS12b_meta)
###
FS12b_meta %>% filter(tissue == 'F' & day == 0) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'F' & day == 0), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 0', x=-1.25, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'F' & day == 2) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'F' & day == 2), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 2', x=-1.25, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'F' & day == 7) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'F' & day == 7), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 7', x=-1.25, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'F' & day == 14) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'F' & day == 14), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 14', x=-1.25, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'F' & day == 21) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'F' & day == 21), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 21 feces', x=-.5, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'C' & day == 21) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
# geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'C' & day == 21), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 21\nCecal Contents', x=-0, y=.0, size = 7)
FS12b_meta %>% filter(tissue == 'X' & day == 21) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
# geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'X' & day == 21), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 21\nCecal Mucosa', x=-0, y=.6, size = 7)
FS12b_meta %>% filter(tissue == 'I' & day == 21) %>%
ggplot(aes(x=MDS1, y=MDS2, fill=treatment, color=treatment)) +
# geom_point(data=greys, inherit.aes = FALSE, color='grey', aes(x=MDS1, y=MDS2), size=2) + geom_point(alpha=0.5, size=2) +
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) +
geom_path(data = filter(df_ell, tissue == 'I' & day == 21), aes(x=NMDS1,y=NMDS2), size=1.05)+
scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + theme(panel.background = element_blank()) +
annotate(geom='text', label='Day 21\nIleal Mucosa', x=-0, y=.6, size = 5)
####
# INSERTED #
### This is interesting... What does this mean?? ####
# need to move this whole section down below merge #
# FS12b_meta %>% filter(tissue == 'F') %>%
# group_by(day, treatment) %>%
# summarise(fMDS1=mean(fMDS1),
# fMDS2=mean(fMDS2)) %>%
# ggplot(aes(x=day, y=fMDS1, group=treatment, color=treatment)) +
# geom_line() + geom_point()
##### #####
# FS12b_meta %>% ggplot(aes(pen, fill=treatment))+geom_histogram(binwidth = 1)
# FS12b_meta %>% filter(tissue == 'F') %>%
# group_by(day, treatment) %>%
# summarise(fMDS1=mean(fMDS1),
# fMDS2=mean(fMDS2)) %>%
# ggplot(aes(x=day, y=fMDS2, group=treatment, color=treatment)) +
# geom_line() + geom_point()
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=fMDS1, group=pignum)) +
# geom_line() + geom_point(aes(color=pen))
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=fMDS2, group=pignum, color=treatment)) +
# geom_line() + geom_point()
# FS12b_meta %>% filter(tissue == 'F') %>%
# ggplot(aes(x=day, y=even, group=pignum, color=treatment)) +
# geom_line() + geom_point()
######### PW ADON HERE ########
#should split fecal and tissue 1st to reduce # of permutations...
####### TEMP SO I CAN PLAY WITH DIST MEASURES #####
set.seed(71)
#
# all_pwad <- pairwise.adonis(data.frame(FS12a_rare@otu_table), FS12a_rare@sam_data$set, perm = 999, sim.method = 'bray', binary = FALSE)
#
# pwad_to_cont <- all_pwad[grep('Control', all_pwad$pairs),]
# # same_day <- pwad_to_cont[grep('.*_(.*)_.*_.* vs .*_\\1_.*_.*', pwad_to_cont$pairs),]
# same_day_tissue <- pwad_to_cont[grep('(.*)_(.*)_.* vs \\1_\\2_.*', pwad_to_cont$pairs),]
# same_day_tissue$treatment <- sub('D[0-9]+_[FX]_([A-Za-z_]+) vs .*_.*_.*', '\\1',same_day_tissue$pairs)
#
# same_day_tissue[same_day_tissue$treatment == 'Control',]$treatment <- sub('.*_.*_.* vs .*_.*_(.*)','\\1', same_day_tissue[same_day_tissue$treatment == 'Control',]$pairs)
# # sub('(D[0-9]+)_([FX])_([A-Za-z_]+) vs .*_.*_.*', '\\2',same_day_tissue$pairs)
# same_day_tissue$tissue <- sub('(D[0-9]+)_([FX])_([A-Za-z_]+) vs .*_.*_.*', '\\2',same_day_tissue$pairs)
# same_day_tissue$day <- sub('(D[0-9]+)_([FX])_([A-Za-z_]+) vs .*_.*_.*', '\\1',same_day_tissue$pairs)
#
# same_day_tissue$tissue <- ifelse(same_day_tissue$tissue == 'F', 'feces', 'cecal_mucosa')
#
# # same_day_tissue$p.adjusted <- p.adjust(same_day_tissue$p.value, method = 'fdr')
#
# same_day_tissue <- same_day_tissue %>% mutate(set=paste(tissue, day, sep = '_'))
# same_day_tissue <- same_day_tissue %>% group_by(set) %>% mutate(p.adjusted = p.adjust(p.value, method = 'fdr'))
# same_day_tissue$p.plot <- ifelse(same_day_tissue$p.adjusted <= 0.05, paste('p=', round(same_day_tissue$p.adjusted, 3), sep = ''), NA)
#
# same_day_tissue$set <- factor(same_day_tissue$set, levels = c('feces_D0', 'feces_D23', 'feces_D30', 'cecal_mucosa_D30'))
#
# #### NEED TO COME UP WITH COLOR TO USE
# #
#
#
#
#
# same_day_tissue %>%
# ggplot(aes(x=treatment, y=F.Model, fill=treatment)) +
# geom_col(color='black') + facet_wrap(~set) + geom_text(aes(label=p.plot), nudge_y = .2) +
# ggtitle('Community differences compared to controls', subtitle = 'larger F = greater difference. pvalues shown when <0.05') +
# scale_fill_brewer(palette = 'Set1')
# FS12b <- FS12b %>% prune_samples(samples = sample_data(FS12b)$tissue != 'Q')
# FS12b_rare <- FS12b %>% prune_samples(samples = sample_data(FS12b)$tissue != 'I')
# changed this to only include feces
FS12b_rare <- FS12b %>% prune_samples(samples = sample_data(FS12b)$tissue == 'F')
FS12b_rare <- rarefy_even_depth(FS12b_rare)
min(sample_sums(FS12b_rare))
PW.ad <- pairwise.adonis(x=data.frame(FS12b_rare@otu_table), factors = FS12b_rare@sam_data$set, sim.method = 'bray', p.adjust.m = 'none', perm = 999)
# PW.ad <- pairwise.adonis(x=rrarefy(FS12b@otu_table, min(rowSums(FS12b@otu_table))), factors = FS12b@sam_data$set, sim.method = 'jaccard', p.adjust.m = 'none', permutations = 9999)
###### prob doesnt matter... #####
# report this with beginning diffs in beta div
# adonis(data.frame(FS12b_rare@otu_table) ~ tissue + day + treatment, data = data.frame(FS12b_rare@sam_data))
adonis(data.frame(FS12b_rare@otu_table) ~ day + treatment, data = data.frame(FS12b_rare@sam_data))
#######
PW.ad$pairs
goods <- PW.ad[grep('(.*)_(.*)_(.*)_(.*) vs (.*)_\\2_\\3_(.*)', PW.ad$pairs),]
times <- PW.ad[grep('(.*)_(.*)_(.*)_(.*) vs (.*)_(.*)_\\3_\\4', PW.ad$pairs),]
# length(goods[,1])
# goods$p.adjusted <- p.adjust(p=goods$p.value,method = 'holm')
D0 <- goods[grep('D0', goods$pairs),]
D0$day <- 0
D2 <- goods[grep('D2_', goods$pairs),]
D2$day <- 2
D7 <- goods[grep('D7', goods$pairs),]
D7$day <- 7
D14 <- goods[grep('D14', goods$pairs),]
D14$day <- 14
D21 <- goods[grep('D21', goods$pairs),]
D21$day <- 21
fin <- rbind(D0, D2, D7, D14, D21)
fin$pairs <- gsub('X12b_', '', fin$pairs)
fin$pairs <- gsub('_F_', ' feces ', fin$pairs)
fin$pairs <- gsub('_C_', ' cec_cont ', fin$pairs)
fin$pairs <- gsub('_X_', ' cec_muc ', fin$pairs)
fin$pairs <- gsub('_I_', ' il_muc ', fin$pairs)
fin$pairs <- gsub('_Q_', ' tet ', fin$pairs)
# write.csv(fin, 'mothur_PERMANOVA_results.csv')
to_conts <- fin[grep('Control', fin$pairs),]
to_conts$tissue <- gsub('D[0-9]+ ([A-Za-z_]+) [A-Za-z]+ vs D[0-9]+ [A-Za-z_]+ ([A-Za-z]+)', '\\1', to_conts$pairs)
to_conts$treatment <- gsub('D[0-9]+ ([A-Za-z_]+) ([A-Za-z]+) vs D[0-9]+ [A-Za-z_]+ ([A-Za-z]+)', '\\2', to_conts$pairs)
to_conts$treatment[to_conts$treatment == 'Control'] <- gsub('D[0-9]+ ([A-Za-z_]+) ([A-Za-z]+) vs D[0-9]+ [A-Za-z_]+ ([A-Za-z]+)', '\\3', to_conts[to_conts$treatment == 'Control',]$pairs)
# to_conts$p.hoch <- p.adjust(to_conts$p.value, method = 'hoch')
# to_conts$p.holm <- p.adjust(to_conts$p.value, method = 'holm')
to_conts$p.fdr <- p.adjust(to_conts$p.value, method = 'fdr')
to_conts$p.fdr <- round(to_conts$p.fdr, digits = 3)
to_conts$p.fdr.lab <- ifelse(to_conts$p.fdr < 0.05, to_conts$p.fdr, NA)
to_conts$treatment <- factor(to_conts$treatment, levels=c('RPS', 'Acid', 'ZnCu','RCS', 'Bglu'))
######## FIGURE 3 ##########
# ADD ALPHA DIV and DISPERSION
to_conts %>% filter(tissue == 'feces') %>% ggplot(aes(x=day, y=F.Model, group=treatment, fill=treatment, color=treatment, label=p.fdr.lab)) +
geom_line(size=1.52) + geom_point(shape=21) + scale_color_manual(values=c('#3399FF', 'orange', 'red', 'grey', 'purple')) +
geom_label(color='black') +
scale_fill_manual(values=c('#3399FF', 'orange', 'red', 'grey', 'purple')) +
ggtitle('Community differences compared to control group over time', subtitle = )
to_conts
# tmp_adon <- same_day_tissue %>% filter(day %in% c('D0', 'D23'))
#
# tmp_adon <- tmp_adon[,colnames(tmp_adon) %in% colnames(to_conts)]
#
# tmp_adon2 <- to_conts[,colnames(to_conts) %in% colnames(tmp_adon)]
#
# long_adon <- rbind(tmp_adon, tmp_adon2)
#
# long_adon$day <- sub('D0',-30,long_adon$day)
# long_adon$day <- sub('D23',-7,long_adon$day)
# long_adon$day <- as.numeric(long_adon$day)
#
# long_adon$p.fdr <- p.adjust(long_adon$p.value, method = 'fdr')
# long_adon$p.fdr <- round(long_adon$p.fdr, digits = 3)
# long_adon$p.fdr.lab <- ifelse(long_adon$p.fdr < 0.05, long_adon$p.fdr, NA)
#
# long_adon$treatment <- factor(long_adon$treatment, levels=c('RPS', 'Acid', 'ZnCu','RCS', 'Bglu'))
#
#
# long_adon %>% filter(tissue == 'feces' & treatment %in% c('RPS', 'Acid', 'ZnCu', 'RCS', 'Bglu')) %>% ggplot(aes(x=day, y=F.Model, group=treatment, fill=treatment, color=treatment, label=p.fdr.lab)) +
# geom_line(size=1.52) + geom_point(shape=21) + scale_color_manual(values=c('#3399FF', 'orange', 'red', 'grey', 'purple')) +
# geom_label(color='black') +
# scale_fill_manual(values=c('#3399FF', 'orange', 'red', 'grey', 'purple')) +
# ggtitle('Community differences compared to control group over time', subtitle = )
#
#
#SHOULD PLOT DIVERSITY AND RICHNESS AT SAME TIMEPOINTS HERE
########### HIGH LOW TO CONT ##########
##SHOULD ORDINATE THIS TOO##
FS12b_HL <- FS12b %>% subset_samples(treatment %in% c('Control', 'RPS') & tissue =='F')
FS12b_HL <- FS12b %>% subset_samples(treatment %in% c('Control', 'RPS'))
FS12b_HL %>% subset_samples(treatment == 'RPS') %>% sample_data() %>% select(pignum)
FS12b_HL@sam_data$shed <- ifelse(FS12b_HL@sam_data$pignum %in% c(373,321,181,392,97), 'low',
ifelse(FS12b_HL@sam_data$pignum %in% c(50, 93,355, 244), 'high', 'Control'))
FS12b_HL@sam_data$set <- paste(FS12b_HL@sam_data$day, FS12b_HL@sam_data$tissue, FS12b_HL@sam_data$shed, sep = '_')
FS12b_HL@sam_data
PW.ad <- pairwise.adonis(x=rrarefy(FS12b_HL@otu_table, min(rowSums(FS12b_HL@otu_table))), factors = FS12b_HL@sam_data$set, sim.method = 'bray', p.adjust.m = 'none', perm = 9999)
# PW.ad <- pairwise.adonis(x=rrarefy(FS12b_HL@otu_table, min(rowSums(FS12b_HL@otu_table))), factors = FS12b_HL@sam_data$set, sim.method = 'jaccard', p.adjust.m = 'none', permutations = 9999)
PW.ad$pairs
goods <- PW.ad[grep('(.*)_(.*) vs \\1_.*', PW.ad$pairs),]
# times <- PW.ad[grep('(.*)_(.*)_(.*)_(.*) vs (.*)_(.*)_\\3_\\4', PW.ad$pairs),]
length(goods[,1])
# goods$p.adjusted <- p.adjust(p=goods$p.value,method = 'holm')
goods$pairs
D0 <- goods[grep('D0', goods$pairs),]
D0$day <- 0
D2 <- goods[grep('D2_', goods$pairs),]
D2$day <- 2
D7 <- goods[grep('D7', goods$pairs),]
D7$day <- 7
D14 <- goods[grep('D14', goods$pairs),]
D14$day <- 14
D21 <- goods[grep('D21', goods$pairs),]
D21$day <- 21
# I think fin and goods are the same thing right now.... why did I do this again?
fin <- rbind(D0, D2, D7, D14, D21)
fin$pairs <- gsub('X12b_', '', fin$pairs)
fin$pairs <- gsub('_F_', ' feces ', fin$pairs)
fin$pairs <- gsub('_C_', ' cec_cont ', fin$pairs)
fin$pairs <- gsub('_X_', ' cec_muc ', fin$pairs)
fin$pairs <- gsub('_I_', ' il_muc ', fin$pairs)
fin$pairs <- gsub('_Q_', ' tet ', fin$pairs)
# write.csv(fin, 'mothur_PERMANOVA_results.csv')
#within tissues
fin <- fin[grep('.* (.*) .* vs .* \\1 .*', fin$pairs),]
to_conts <- fin[grep('Control', fin$pairs),]
not_conts <- fin[-grep('Control', fin$pairs),]
to_conts$tissue <- gsub('D[0-9]+ (.*) ([A-Za-z_]+) vs D[0-9]+ .* ([A-Za-z]+)', '\\1', to_conts$pairs)
to_conts$treatment <- gsub('D[0-9]+ .* ([A-Za-z_]+) vs D[0-9]+ .* ([A-Za-z]+)', '\\2', to_conts$pairs)
to_conts$p.fdr <- p.adjust(to_conts$p.value, method = 'fdr')
to_conts$p.fdr <- round(to_conts$p.fdr, digits = 3)
to_conts$p.fdr.lab <- ifelse(to_conts$p.fdr < 0.05, to_conts$p.fdr, NA)
# to_conts$treatment <- factor(to_conts$treatment, levels=c('RPS', 'Acid', 'ZnCu', 'RCS', 'Bglu'))
p1 <- to_conts %>% filter(tissue =='feces') %>% ggplot(aes(x=day, y=F.Model, group=treatment, fill=treatment, color=treatment, label=p.fdr.lab)) +
geom_line(size=1.52) + geom_point(shape=21) + scale_color_brewer(palette = 'Set1') +
geom_label(color='black') +
scale_fill_brewer(palette = 'Set1') +
ggtitle('Community differences compared to control group over time', subtitle = 'RPS only') + labs(fill='Shedding',
color='Shedding')
p1
to_conts %>% filter(!(tissue %in% c('feces', 'tet'))) %>%
ggplot(aes(x=tissue, y=F.Model, fill=treatment)) +
geom_col(position = 'dodge', color='black') + geom_text(aes(label=p.fdr.lab), position=position_dodge(width = 1), vjust=1.5) +
ggtitle('PERMANOVA F.stat. : Difference compared to controls across tissues',
subtitle = 'Higher values represent a greater difference compared to control') + scale_fill_brewer(palette = 'Set1')
###
#HIGH LOW ORDINATE#
### need to dephyloseqize these objects before NMDS works
HIGH_LOW_OTU <- rarefy_even_depth(FS12b_HL)@otu_table %>% data.frame()
HIGH_LOW_META <- FS12b_HL@sam_data %>% data.frame()
HIGH_LOW_NMDS <- NMDS_ellipse(OTU_table=HIGH_LOW_OTU, metadata = HIGH_LOW_META, grouping_set = 'set')
HIGH_LOW_NMDS[[1]]$shed <- factor(HIGH_LOW_NMDS[[1]]$shed, levels = c('high', 'low', 'Control'))
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'F' & day =='D0') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) + scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 0, RPS high/low & control')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'F' & day =='D2') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5) + scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 2, RPS high/low & control')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'F' & day =='D7') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 7, RPS high/low & control')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'F' & day =='D14') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 14, RPS high/low & control')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'F' & day =='D21') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 21, RPS high/low & control')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'X' & day =='D21') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 21, RPS high/low & control, Cecal mucosa')
HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'C' & day =='D21') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 21, RPS high/low & control, Cecal contents')
p <- HIGH_LOW_NMDS[[1]] %>% filter(tissue == 'I' & day =='D21') %>%
ggplot(aes(x=MDS1, y=MDS2, group=set, color=shed)) + geom_point(size=3)+
geom_segment(aes(xend =centroidX , yend = centroidY), alpha=0.5)+ scale_color_brewer(palette = 'Set1') +
ggtitle('Feces Day 21, RPS high/low & control, Ileal mucosa')
ggplot2::ggplot_build(p)
# ########### groups compared to their D0 #######
# I think I'll cut this...
# T0s <- times[grep('D0', times$pairs),]
#
# T0s$pairs <- gsub('X12b_', '', T0s$pairs)
# T0s$pairs <- gsub('_F_', ' feces ', T0s$pairs)
#
# #4DAF4A, #377EB8)
#
# #377EB8
#
# #E41A1C
#
#
#
# T0s$tissue <- gsub('D[0-9]+ ([A-Za-z_]+) [A-Za-z]+ vs D[0-9]+ [A-Za-z_]+ ([A-Za-z]+)', '\\1', T0s$pairs)
# T0s$treatment <- gsub('D[0-9]+ ([A-Za-z_]+) [A-Za-z]+ vs D[0-9]+ [A-Za-z_]+ ([A-Za-z]+)', '\\2', T0s$pairs)
#
# T0s$day <- gsub('D[0-9]+ ([A-Za-z_]+) [A-Za-z]+ vs (D[0-9]+) [A-Za-z_]+ ([A-Za-z]+)', '\\2', T0s$pairs)
#
# # what's this for??
# T0s[T0s$day == "D0",]$day <- gsub('(D[0-9]+) ([A-Za-z_]+) [A-Za-z]+ vs (D[0-9]+) [A-Za-z_]+ ([A-Za-z]+)', '\\1', T0s[T0s$day == "D0",]$pairs)
#
# T0s$day <- factor(gsub('D','',T0s$day), levels = c(2,7,14,21))
# T0s$pairs
#
# T0s$p.fdr <- round(p.adjust(T0s$p.value, 'fdr'),3)
# T0s$p.fdr.lab <- ifelse(T0s$p.fdr <0.05, T0s$p.fdr, NA)
# T0s$treatment <- factor(T0s$treatment, levels = c('Control', 'RPS', 'Acid', 'ZnCu', 'RCS', 'Bglu'))
#
#
# # this seems different.... investigate
#
# T0s %>% filter(tissue == 'feces') %>% ggplot(aes(x=day, y=F.Model, group=treatment, fill=treatment, color=treatment, label=p.fdr.lab)) +
# geom_line(size=1.52) + geom_point(shape=21) + scale_color_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
# geom_label(color='black') +
# scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple'))+
# ggtitle("Community differences compared to each group's Day 0 conformation",
# subtitle = 'FDR corrected pvalues shown in boxes') + xlab('Day (vs Day 0)')
#
################################ RPS SPLIT #########################
FS12b@sam_data$pignum
FS12_RPS <- subset_samples(FS12b, treatment == 'RPS')
FS12_RPS@sam_data$day <- factor(FS12_RPS@sam_data$day, levels = c('D0', 'D2','D7', 'D14', 'D21'))
FS12_RPS@sam_data$shed <- ifelse(FS12_RPS@sam_data$pignum %in% c(373,321,181,392,97), 'low', 'high')
FS12_RPS@sam_data$shed <- factor(FS12_RPS@sam_data$shed, levels = c('high', 'low'))
FS12_RPS@sam_data$set <- paste(FS12_RPS@sam_data$set, FS12_RPS@sam_data$shed, sep = '_')
#
# Keeping samples separate by day #
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D0' & tissue == 'F')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
# when is the resultsNames not returning what I expect now?
# shed_low_vs_high is what i expect but now 'shed1' is what is returned...
# ok now its fine, must have ran some code twice?
resultsNames(FS12.de)
tmpres <- results(FS12.de, name = 'shed_low_vs_high', cooksCutoff = FALSE)
tmpres <- lfcShrink(FS12.de, res=tmpres, coef = 'shed_low_vs_high', type = 'apeglm')
tmpres[tmpres$padj < 0.1,]
D0_highlow <- Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
### D0 Q
#
# FS12b.glom <- FS12_RPS
# FS12b.glom <- subset_samples(FS12b.glom, day =='D0' & tissue == 'Q')
#
# FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
#
# FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
# library(DESeq2)
# FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
# resultsNames(FS12.de)
#
# D0_Q_highlow <- Deseq.quickplot(DeSeq.object = FS12.de,
# phyloseq.object = FS12b.glom, pvalue = .05, alpha = 0.05,
# name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
#
##### D2
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D2')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D2_highlow <-Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
#### D7
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D7')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D7_highlow <-Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
# D14 #
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D14')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D14_highlow <- Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
##### D21 F
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D21' & tissue == 'F')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D21F_highlow <- Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
#### Tissue X
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D21' & tissue == 'X')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D21X_highlow <-Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
##### tissue C
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D21' & tissue == 'C')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D21C_highlow <-Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
##### tissue I
FS12b.glom <- FS12_RPS
FS12b.glom <- subset_samples(FS12b.glom, day =='D21' & tissue == 'I')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~shed)
FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
resultsNames(FS12.de)
D21I_highlow <- Deseq.quickplot(DeSeq.object = FS12.de,
phyloseq.object = FS12b.glom, pvalue = .1, alpha = 0.1,
name = 'shed_low_vs_high' ,taxlabel = 'Genus', shrink_type = 'normal', cookscut = FALSE)
#
D0_highlow[[2]]$set <- 'D0_feces'
D2_highlow[[2]]$set <- 'D2_feces'
D7_highlow[[2]]$set <- 'D7_feces'
D14_highlow[[2]]$set <- 'D14_feces'
D21F_highlow[[2]]$set <- 'D21_feces'
D21C_highlow[[2]]$set <- 'D21_cecal_content'
D21X_highlow[[2]]$set <- 'D21_cecal_mucosa'
D21I_highlow[[2]]$set <- 'D21_ileal_mucosa'
#
class(D0_highlow[[2]])
RPS_split_master <- bind_rows(list(D0_highlow[[2]],
D2_highlow[[2]],
D7_highlow[[2]],
D14_highlow[[2]],
D21F_highlow[[2]],
D21C_highlow[[2]],
D21X_highlow[[2]],
D21I_highlow[[2]]))
RPS_split_master$imp <- ifelse(RPS_split_master$padj <= 0.05, TRUE, FALSE)
RPS_split_master$set <- factor(RPS_split_master$set, levels = c('D0_feces','D2_feces' ,'D7_feces', 'D14_feces', 'D21_feces', 'D21_cecal_content', 'D21_cecal_mucosa', 'D21_ileal_mucosa'))
RPS_split_master <- RPS_split_master %>% mutate(newp2=paste0('p=', newp))
# devtools::install_github('jtrachsel/ggscinames')
library(ggscinames)
library(grid)
RPS_split_master %>% filter(set %in% c('D0_feces' ,'D7_feces', 'D14_feces', 'D21_feces')) %>%
ggplot(aes(x=reorder(OTU, log2FoldChange), y=log2FoldChange, fill=Treatment)) +
geom_bar(stat='identity') +
geom_text_sciname(aes(x=OTU, y=0, sci = Genus, nonsci=newp2, important=imp), size=3) + coord_flip() +
facet_wrap(~set, ncol = 1, scales = 'free_y') + scale_fill_brewer(palette = 'Set1')
RPS_split_master %>% filter(set %in% c('D21_feces', 'D21_cecal_content', 'D21_cecal_mucosa')) %>%
ggplot(aes(x=reorder(OTU, log2FoldChange), y=log2FoldChange, fill=Treatment)) +
geom_bar(stat='identity') +
geom_text_sciname(aes(x=OTU, y=0, sci = Genus, nonsci=newp2, important=imp), size=3) + coord_flip() +
facet_wrap(~set, ncol = 1, scales = 'free_y') + scale_fill_brewer(palette = 'Set1') + xlab('') + labs(fill='Shedding')
RPS_split_master %>% filter(set %in% c('D21_ileal_mucosa')) %>%
ggplot(aes(x=reorder(OTU, log2FoldChange), y=log2FoldChange, fill=Treatment)) +
geom_bar(stat='identity') +
geom_text_sciname(aes(x=OTU, y=0, sci = Genus, nonsci=newp2, important=imp), size=3) + coord_flip() +
facet_wrap(~set, ncol = 1, scales = 'free_y') + scale_fill_brewer(palette = 'Set1') + xlab('') + labs(fill='Shedding')
library(cowplot)
p <- RPS_split_master %>%
group_by(OTU, Treatment) %>%
filter(padj <= 0.05) %>% tally() %>%
ggplot(aes(x=OTU, y=n, fill=Treatment)) + geom_col() +
scale_fill_brewer(palette = 'Pastel1') + ylab('occurences') + ggtitle('Number of times OTUs are significantly enriched (p<0.05)\n in either shedding phenotype') +
theme(axis.text.x = element_text(angle = 90, vjust = .4)) + xlab('')
RPS_split_master %>%
group_by(OTU, Treatment) %>%
tally() %>%
ggplot(aes(x=OTU, y=n, fill=Treatment)) + geom_col() +
scale_fill_brewer(palette = 'Pastel1') + ylab('occurences') + ggtitle('Number of times OTUs are significantly enriched (p<0.05)\n in either shedding phenotype') +
theme(axis.text.x = element_text(angle = 90, vjust = .4)) + xlab('')
ggplot2::ggplot_build(p)
c("#E41A1C", "#FBB4AE", "#377EB8", "#B3CDE3")
# # MERGE THIS WITH TAX AND PRINT TABLE
# RPS_split_master %>%
# group_by(OTU, Treatment) %>%
# filter(padj <= 0.1) %>% tally()
RPS_split_master <- RPS_split_master %>% mutate(p2=ifelse(padj <= 0.05, 'p < 0.05',
ifelse(padj <= 0.1, 'p < 0.1', NA)))
RPS_split_master <- RPS_split_master %>% mutate(group=factor(paste(p2, Treatment), levels=c("p < 0.05 high",
"p < 0.1 high",
"p < 0.05 low",
"p < 0.1 low")))
RPS_split_master %>% group_by(OTU, group) %>% tally() %>%
ggplot(aes(x=OTU, y=n, fill=group)) + geom_col(color='black') +
scale_fill_manual(values = c("#E41A1C", "#FBB4AE", "#377EB8", "#B3CDE3")) +
ylab('occurences') +
ggtitle('Number of times OTUs are enriched \n in either RPS shedding phenotype') +
theme(axis.text.x = element_text(angle = 90, vjust = .4)) + xlab('') + coord_flip()
int_OTUs <- RPS_split_master %>% group_by(OTU, group) %>% tally() %>% filter(n>1) %>% select(OTU) %>% unlist(use.names = FALSE)
# write_csv(RPS_split_ints, 'RPS_split_int_OTUs.csv')
RPS_split_ints <- RPS_split_master %>% filter(OTU %in% int_OTUs) %>%
select(OTU, Treatment, Genus) %>% unique()
tax <- as.data.frame(FS12b.glom@tax_table)
tax$OTU <- rownames(tax)
#############################################
# regular Differential abundance
# should make a function for this....
# it would take timepoint, tissue, and return the sig diff OTUs in a dataframe
# need to add tissue and timepoint to dataframe before return
# unique(FS12b@sam_data$pignum)
#
# FS12b@sam_data$treatment
# DESeq
# DESeq_difabund <- function(phyloseq, day, tissue, scientific = TRUE, shrink_type='normal',
# alpha=0.1, cooks_cut=FALSE, pAdjustMethod='BH'){
#
# # FS12b.glom <- tax_glom(FS12b, taxrank = 'Genus')
# FS12b.glom <- prune_samples(x = phyloseq, samples = phyloseq@sam_data$day == day & phyloseq@sam_data$tissue == tissue)
# FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
# FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~treatment)
# FS12.de <- DESeq(FS12.de, test = 'Wald', fitType = 'parametric')
#
# finres <- list()
# resind <- 1
# for (i in 2:length(resultsNames(FS12.de))){
# print(resultsNames(FS12.de)[i])
# treat <- sub('treatment_(.*)_vs_Control','\\1',resultsNames(FS12.de)[i])
# comp <- sub('treatment_', '', resultsNames(FS12.de)[i])
#
# # i dont think these two strategies for results calc are compatible....
# res <- results(object = FS12.de, name = resultsNames(FS12.de)[i], alpha=alpha, cooksCutoff = cooks_cut, pAdjustMethod = pAdjustMethod)
# res <- lfcShrink(FS12.de, coef = resultsNames(FS12.de)[i], type = shrink_type)
# sigtab = res[which(res$padj < alpha), ]
#
# if (nrow(sigtab) != 0){
# # browser()
# sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(FS12b.glom)[rownames(sigtab), ], "matrix"))
# sigtab$newp <- format(round(sigtab$padj, digits = 3), scientific = scientific)
# sigtab$Treatment <- ifelse(sigtab$log2FoldChange >=0, treat, paste('down',treat, sep = '_'))
# sigtab$OTU <- rownames(sigtab)
# sigtab$tissue <- tissue
# sigtab$day <- day
# sigtab$comp <- comp
# finres[[resind]] <- sigtab
#
# resind <- resind + 1
# }
#
#
#
# }
#
# finres <- bind_rows(finres)
# return(finres)
#
# }
# something is afoot......
# apparently I removed the Q tissues.....
tocont <- list(DESeq_difabund(phyloseq = FS12b, day = 'D0', tissue = 'F', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
# DESeq_difabund(phyloseq = FS12b, day = 'D0', tissue = 'Q', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D2', tissue = 'F', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D7', tissue = 'F', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D14', tissue = 'F', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D21', tissue = 'F', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D21', tissue = 'C', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D21', tissue = 'X', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'),
DESeq_difabund(phyloseq = FS12b, day = 'D21', tissue = 'I', scientific = TRUE, shrink_type = 'apeglm',alpha = 0.05, cooks_cut = TRUE, pAdjustMethod = 'BH'))
tocont <- bind_rows(tocont)
tocontf <- tocont[abs(tocont$log2FoldChange) > .75,]
tocontf %>% ggplot(aes(x=OTU, y=log2FoldChange, fill=Treatment)) +
geom_col(color='black') + coord_flip() + geom_hline(yintercept = 20, color='red', size=3)
#### ON TO SOMETHIGN HERE ####
# some variation of this figure for dif ab.
# maybe do one panel for fecal difabund
# one panel for tissue difabund
tocontf %>% ggplot(aes(x=Genus, y=log2FoldChange, color=Treatment, shape=tissue)) +
geom_point() + coord_flip() + geom_hline(yintercept = 0, color='black', size=1) +
facet_wrap(~day, scales = 'free')
###
tocontf %>% filter(tissue == 'F') %>% ggplot(aes(x=Genus, y=log2FoldChange, color=Treatment)) +
geom_point() + coord_flip() + geom_hline(yintercept = 0, color='black', size=1) +
facet_wrap(~day, scales = 'free' ,ncol = 5)
biguns <- tocontf %>% group_by(OTU) %>% summarise(tot=sum(log2FoldChange)) %>% filter(tot >20) %>% select(OTU) %>% unlist()
tocont %>% filter(OTU %in% biguns) %>% select(OTU,Genus) %>% unique()
tocontf %>% group_by(OTU, Treatment) %>% tally() %>% filter(n>2) %>% as.data.frame()
tocontf %>% group_by(comp) %>% tally() %>% as.data.frame() %>%
ggplot(aes(x=comp, y=n)) + geom_col() + ggtitle('number of differentially abundant OTUs over the entire experiment')
#### Ok that wasnt so bad.
# Now, which OTUs changed at Salmonella infection?
# I think I need to add sum_sal info at beginning....
# c(c('D0', 'D2'))
#### log_sal as continuous covariate ####
formula(paste('~', 'log_sal'))
# FS12b@sam_data$day %in% c(day) & FS12b@sam_data$tissue == 'F'
##### SHOULD REALLY LOOK INTO INTERACTION WITH TREATMENT HERE!!!!!!!!
### OR SUBSET EACH TREATMENT
blarg <- function(phyloseq_obj, day, tissue, covariate, shrink_type='apeglm'){
form <- formula(paste('~', covariate))
# print(form)
FS12b.glom <- phyloseq_obj %>% prune_samples(samples = phyloseq_obj@sam_data$day %in% c(day) & phyloseq_obj@sam_data$tissue == tissue)
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
# FS12b.glom@sam_data$log_sal
FS12b.de <- phyloseq_to_deseq2(FS12b.glom, form)
FS12b.de <- DESeq(FS12b.de, test = 'Wald', fitType = 'parametric')
# these are not both possible. Right now only lfcshrink is doing anytihng
res <- results(FS12b.de, cooksCutoff = FALSE, name = covariate)
res <- lfcShrink(FS12b.de, coef = covariate, type = shrink_type)
# resultsNames(FS12b.de)
res <- res[!is.na(res$padj),]
res <- res[res$padj < 0.1,]
sigtab <- res[abs(res$log2FoldChange) > .1 ,]
sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(phyloseq_obj)[rownames(sigtab), ], "matrix"))
sigtab$newp <- format(round(sigtab$padj, digits = 3), scientific = TRUE)
# sigtab$Treatment <- ifelse(sigtab$log2FoldChange >=0, treat, paste('down',treat, sep = '_'))
sigtab$OTU <- rownames(sigtab)
sigtab[[covariate]] <- ifelse(sigtab$log2FoldChange >0 , 'increased', 'decreased')
# sigtab$salm <- ifelse(sigtab$log2FoldChange >0 , 'increased', 'decreased')
sigtab <- sigtab[order(sigtab$log2FoldChange),]
sigtab$OTU <- factor(sigtab$OTU, levels = sigtab$OTU)
sigtab$day <- day
sigtab$tissue <- tissue
p <- sigtab %>% ggplot(aes_string(x='OTU', y='log2FoldChange', fill=covariate)) +
geom_col(color='black') + coord_flip() + geom_text(aes(label=Genus, y=0))
return(list(p, sigtab))
}
blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'C', covariate = 'log_sal')
# nnnn[['test']] <- 'TEST'
# blarg()
# across all treatments
# blarg(phyloseq_obj = FS12b, day = c('D2', 'D7', 'D14', 'D21'), tissue = 'F', covariate = 'log_sal')
# THESE ARE THE ONES AT DAY 2 THAT HAVE A LINEAR RELATIONSHIP WITH SALMONELLA
# LOG2FOLD CHANGE HERE MEANS whut?
# FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 2, FS12b.glom)
# FS12b %>% subset_samples(treatment =='RPS') %>% prune_taxa(taxa_sums() > 2)
global_sal_OTUs <- list(blarg(phyloseq_obj = FS12b, day = 'D2', tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12b, day = 'D7', tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12b, day = 'D14', tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'C', covariate = 'log_sal')[[2]])
global_sal_OTUs <- bind_rows(global_sal_OTUs)
## this is the filtered OTUs that differ by treatment
tocontf
# These are the OTUs with some kind of linear relationship with log_sal in their tissue/timepoint
increase <- global_sal_OTUs %>% filter(log2FoldChange > 0) # OTUs associated with more salmonella
decrease <- global_sal_OTUs %>% filter(log2FoldChange < 0) # OTUs associated with less salmonella
# THESE l2fc values represent enrichment relative to control
treat_n_sal_increase <- tocontf[tocontf$OTU %in% increase$OTU,] # these are the OTUs that are associated with a treatment and also increased sal
treat_n_sal_decrease <- tocontf[tocontf$OTU %in% decrease$OTU,] # these are the OTUs that are associated with a treatment and also decreased sal
#
# treat_n_sal_increase <- treat_n_sal_increase %>% mutate(OTU_day_tis=paste(OTU,day, tissue, sep = '_'))
# treat_n_sal_decrease <- treat_n_sal_decrease %>% mutate(OTU_day_tis=paste(OTU,day, tissue, sep = '_'))
# ## THIS GETS TRICKY BECAUSE THE SPECIFIC DAY/TISSUE COMBINATION THESE OTUs ARE ASSOCIATED WITH SALMONELLA DONT NECESSARILY LINE UP WITH
# WHEN THEY ARE ENRICHED IN TREATMENTS....
# THESE l2fc values represent relationship with salmonella
# THESE TWO BLOCKS ONLY SHOW WHEN GLOBAL ASSOCIATION WITH SAL AND TREATMENT ENRICHMENT MATCH UP AT SAME TIMEPOINT/TISSUE
global_increase_treat_match <- increase[increase$OTU %in% tocontf$OTU,] %>%
select(OTU, log2FoldChange, day, tissue) %>%
mutate(OTU_day_tis=paste(OTU, day, tissue, sep='_'),
sal_rel=log2FoldChange) %>%
select(-log2FoldChange) %>%
right_join(treat_n_sal_increase) %>% na.omit()# these are the OTUs that are associated with an increase in sal and also a treatment
global_decrease_treat_match <- decrease[decrease$OTU %in% tocontf$OTU,] %>%
select(OTU, log2FoldChange, day, tissue) %>%
mutate(OTU_day_tis=paste(OTU, day, tissue, sep='_'),
sal_rel=log2FoldChange) %>%
select(-log2FoldChange) %>%
right_join(treat_n_sal_decrease) %>% na.omit()
big_glob_decrease_treatmatch <- global_decrease_treat_match %>% group_by(OTU) %>% tally() %>% filter(n>1) %>% select(OTU) %>% unlist()
rbind(global_increase_treat_match, global_decrease_treat_match) %>%
ggplot(aes(x=OTU, y=sal_rel, fill=Treatment, color=day)) +
geom_col() +
coord_flip() +
geom_text(aes(y=0, x=OTU, label=Genus), color='black') +
ylim(-7, 7) + ggtitle('OTUs with a linear relationship to log_sal \n and enriched in any one treatment')
################
decrease[decrease$OTU %in% tocontf$OTU,] %>% select(OTU, log2FoldChange, day, tissue)
global_sal_OTUs[!(global_sal_OTUs$OTU %in% tocontf$OTU),] # these are the OTUs that are not associated with a treatment but have an association with log_sal at some timepoint/tissue
big_globs <- global_sal_OTUs[!(global_sal_OTUs$OTU %in% tocontf$OTU),] %>% group_by(OTU) %>% tally() %>% filter(n>1) %>% select(OTU) %>% unlist()
global_sal_OTUs$day <- factor(global_sal_OTUs$day, levels = c('D2', 'D7', 'D14', 'D21'))
# THIS ONE IS OTUS THAT HAVE SIG LIN RELATIONSHIP WITH LOG_SAL at more than 1 time
# no enrich in any treatment relative to control
# THIS ONE!
global_sal_OTUs %>% filter(OTU %in% big_globs) %>%
ggplot(aes(x=OTU, y=log2FoldChange, fill=day)) +
geom_hline(yintercept = 0) +
geom_col(position = position_dodge2(preserve='single')) +
geom_text_sciname(aes(x = OTU, y=0, sci=Genus), alpha=.5, size=5) +
coord_flip() + ylim(-3,3) + ggtitle('OTUs with significant linear relationships with log_sal at more than 1 timepoint\n but not associated with any treatment',
'Log2FoldChange is magnitude of association with salmonella')
# global_sal_OTUs %>% ggplot(aes(x=OTU, y=log2FoldChange, color))
increase %>% group_by(OTU) %>% tally() %>% filter(n>1)
decrease %>% group_by(OTU) %>% tally() %>% filter(n>1)
# blarg(phyloseq_obj = FS12b, day = 'D2', tissue = 'F', covariate = 'log_sal')[[2]]
# # LOOK FOR OTUs associated with treatment that seem to help salmonella status and OTUS associated with treatment that do not
# blarg(phyloseq_obj = FS12b, day = 'D7', tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12b, day = 'D14', tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'F', covariate = 'log_sal')
#
# FS12b@sam_data$log_sal
#
# # blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'X', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'C', covariate = 'log_sal')
# # blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'I', covariate = 'log_sal')
#
# #
# these make less sense. THey look at linear relationships between OTUs and AULC
# blarg(phyloseq_obj = FS12b, day = 'D0', tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D2', tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D7', tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D14', tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'F', covariate = 'AULC')
#
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'X', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'C', covariate = 'AULC')
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'I', covariate = 'AULC')
#
#
#### global VFA
# blarg(phyloseq_obj = FS12b, day = 'D21', tissue = 'C', covariate = 'butyrate')
# FS12b_vfa_prune <- prune_samples(x = FS12b , samples = !(FS12b@sam_data$pignum %in% c(6, 265,453, 458, 461, 469, 472)))
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'C', covariate = 'butyrate')
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'C', covariate = 'caproate')
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'C', covariate = 'valerate')
#
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'X', covariate = 'butyrate')
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'X', covariate = 'caproate')
# blarg(phyloseq_obj = FS12b_vfa_prune, day = 'D21', tissue = 'X', covariate = 'valerate')
#
#
###
####MOVED FROM ABOVE ####
#### RPS ONLY BLARG ####
### BLARG BY TREATMENT ####
FS12_RPS <- subset_samples(FS12b, treatment == 'RPS')
FS12_control <- subset_samples(FS12b, treatment == 'Control')
FS12_Acid <- subset_samples(FS12b, treatment == 'Acid')
FS12_RCS <- subset_samples(FS12b, treatment == 'RCS')
# FS12_RPS@sam_data$pignum
### CONTROL
# blarg(phyloseq_obj = FS12_control, day = 'D7',tissue = 'F', covariate = 'log_sal')
control_blarg <- bind_rows(list(blarg(phyloseq_obj = FS12_control, day = 'D2',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_control, day = 'D14',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_control, day = 'D21',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_control, day = 'D21',tissue = 'X', covariate = 'log_sal')[[2]]))
# blarg(phyloseq_obj = FS12_control, day = 'D21',tissue = 'C', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_control, day = 'D21',tissue = 'I', covariate = 'log_sal')
control_blarg$treatment <- 'Control'
##### RPS
RPS_blarg <- bind_rows(list(blarg(phyloseq_obj = FS12_RPS, day = 'D2',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_RPS, day = 'D7',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'X', covariate = 'log_sal')[[2]]))
# blarg(phyloseq_obj = FS12_RPS, day = 'D14',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'C', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'I', covariate = 'log_sal')
RPS_blarg$treatment <- 'RPS'
RPS_blarg
# tocontf[tocontf[grep('RPS', tocontf$comp),]
tocontf_RPS <- tocontf[grep('RPS', tocontf$comp),]
##### ACID
# blarg(phyloseq_obj = FS12_Acid, day = 'D2',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_Acid, day = 'D7',tissue = 'F', covariate = 'log_sal')
acid_blarg <- bind_rows(list(blarg(phyloseq_obj = FS12_Acid, day = 'D14',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_Acid, day = 'D21',tissue = 'X', covariate = 'log_sal')[[2]]))
# blarg(phyloseq_obj = FS12_Acid, day = 'D21',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_Acid, day = 'D21',tissue = 'C', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_Acid, day = 'D21',tissue = 'I', covariate = 'log_sal')
acid_blarg$treatment <- 'Acid'
#### RCS
RCS_blarg <- bind_rows(list(blarg(phyloseq_obj = FS12_RCS, day = 'D2',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_RCS, day = 'D7',tissue = 'F', covariate = 'log_sal')[[2]],
blarg(phyloseq_obj = FS12_RCS, day = 'D21',tissue = 'C', covariate = 'log_sal')[[2]]))
# blarg(phyloseq_obj = FS12_RCS, day = 'D21',tissue = 'X', covariate = 'log_sal')))
# blarg(phyloseq_obj = FS12_RCS, day = 'D14',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RCS, day = 'D21',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RCS, day = 'D21',tissue = 'I', covariate = 'log_sal')
RCS_blarg$treatment <- 'RCS'
master_blarg <- rbind(control_blarg, RPS_blarg, acid_blarg, RCS_blarg)
treat_blarg_bigs <- master_blarg %>% group_by(OTU) %>% tally() %>% filter(n>1) %>% select(OTU) %>% unlist()
master_blarg %>% filter(OTU %in% treat_blarg_bigs & abs(log2FoldChange) > .25 & tissue == 'F') %>%
ggplot(aes(x=OTU, y=log2FoldChange, fill=treatment)) +
geom_col(color='black') + geom_text(aes(label=Genus, y=0), color='black') + coord_flip() +
ggtitle('Fecal OTUs with linear relationships to Salmonella within treatment groups')
master_blarg[master_blarg$OTU %in% tocontf$OTU,] %>% ggplot(aes(x=OTU, y=log2FoldChange, fill=treatment)) +
geom_col(color='black') + geom_text(aes(label=Genus, y=0), color='black') + coord_flip() +
ggtitle('OTUs with linear relationships to Salmonella within treatment groups \n and significant enrichment in one group relative to control',
'LFC values represent relationship with salmonella')
# do this one except only include otus with negative lin rel to sal
# maybe scale size to mimick abs lin rel to sal?
tocontf[tocontf$OTU %in% master_blarg$OTU,] %>% ggplot(aes(x=OTU, y=log2FoldChange, fill=Treatment)) +
geom_point(color='black', shape=21) + geom_text(aes(label=Genus, y=0), color='black') + coord_flip() +# ylim(-20, 60) +
ggtitle('OTUs significantly enriched treatment groups \nthat also have a significant linear relationship with salmonella',
'LFC indicates enrichment relative to control')
p1 <- blarg(phyloseq_obj = FS12_RPS, day = 'D2',tissue = 'F', covariate = 'log_sal')[[1]]
p2 <- blarg(phyloseq_obj = FS12_RPS, day = 'D7',tissue = 'F', covariate = 'log_sal')[[1]]
p3 <- blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'X', covariate = 'log_sal')[[1]]
# p1 <- blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'X', covariate = 'log_sal')[[1]]
# p1 <- blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'X', covariate = 'log_sal')[[1]]
p1 + ggtitle('OTUs with linear relationship to salmonella \nRPS group, D2 Feces')
p2 + ggtitle('OTUs with linear relationship to salmonella \nRPS group, D7 Feces')
# THIS ONE IS V INTERESTING
p3 + ggtitle('OTUs with linear relationship to salmonella \nRPS group, D21 Cecal tissue')
##### I think this is now a repeat?
### THIS SECTION CALCULATES ALL THE OTUs IN THE RPS GROUP THAT HAVE A LINEAR ASSOCIATION WITH salmonella
# NEEDS blarg function defined below...
# in the case of the log_sal covariate these are matched 16S and salmonella culturing samples
# that is the log_sal is measured from the exact same tissue that the 16S data comes from
# in the case of AULC, the 16S samples are related back to the one AULC fecal shedding value calculated for each pig
D2_f_RPS_log_sal <- blarg(phyloseq_obj = FS12_RPS, day = 'D2',tissue = 'F', covariate = 'log_sal')
D7_f_RPS_log_sal <- blarg(phyloseq_obj = FS12_RPS, day = 'D7',tissue = 'F', covariate = 'log_sal')
#blarg(phyloseq_obj = FS12_RPS, day = 'D14',tissue = 'F', covariate = 'log_sal')
#blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'F', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D0',tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12_RPS, day = 'D2',tissue = 'F', covariate = 'AULC')
# blarg(phyloseq_obj = FS12_RPS, day = 'D7',tissue = 'F', covariate = 'AULC')
# D14_f_RPS_AULC <- blarg(phyloseq_obj = FS12_RPS, day = 'D14',tissue = 'F', covariate = 'AULC')
# D21_f_RPS_AULC <- blarg(phyloseq_obj = FS12_RPS, day = 'D21',tissue = 'F', covariate = 'AULC')
D21_x_RPS_log_sal <- blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='X', covariate = 'log_sal') # interesting....
# blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='C', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='I', covariate = 'log_sal')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='X', covariate = 'AULC')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='C', covariate = 'AULC')
# blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='I', covariate = 'AULC')
blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='C', covariate = 'butyrate')
blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='C', covariate = 'valerate')
blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='C', covariate = 'caproate')
meta$butyrate
blarg(phyloseq_obj = FS12_RPS, day = 'D0', tissue='F', covariate = 'butyrate')
blarg(phyloseq_obj = FS12_RPS, day = 'D0', tissue='F', covariate = 'caproate')
blarg(phyloseq_obj = FS12_RPS, day = 'D0', tissue='F', covariate = 'valerate')
blarg(phyloseq_obj = FS12_RPS, day = 'D21', tissue='X', covariate = 'caproate')
#########
FS12b.glom = transform_sample_counts(FS12b, function(x) x / sum(x) )
FS12b.glom = filter_taxa(FS12b.glom, function(x) mean(x) > 1e-5, TRUE)
# PSMELT AND BOXPLOTS HERE!!!!!!!!!
# prune_taxa()
######### WARNIGN!!!!! CAREFUL HERE !!!!!!
# D14 doesnt work here because all RCS pigs have exactly the same shedding level at D14
# FS12b.glom <- FS12b %>% prune_samples(samples = FS12b@sam_data$day != 'D0' & FS12b@sam_data$tissue =='F')
#
#
# FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 5, FS12b.glom)
#
# FS12b.glom@sam_data$log_sal
#
# # FS12b.glom@sam_data$log_sal
#
# FS12b.de <- phyloseq_to_deseq2(FS12b.glom, ~log_sal)
# FS12b.de <- DESeq(FS12b.de, test = 'Wald', fitType = 'parametric')
#
# resultsNames(FS12b.de)
#
# # res <- results(FS12b.de, cooksCutoff = FALSE, name = 'log_sal')
# res <- lfcShrink(FS12b.de, coef = 'log_sal', type = 'apeglm')
#
# # resultsNames(FS12b.de)
#
# res <- res[!is.na(res$padj),]
# res <- res[res$padj < 0.05,]
# sigtab <- res[abs(res$log2FoldChange) > .1 ,]
# sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(FS12b.glom)[rownames(sigtab), ], "matrix"))
# sigtab$newp <- format(round(sigtab$padj, digits = 3), scientific = TRUE)
# # sigtab$Treatment <- ifelse(sigtab$log2FoldChange >=0, treat, paste('down',treat, sep = '_'))
# sigtab$OTU <- rownames(sigtab)
# sigtab$salm <- ifelse(sigtab$log2FoldChange >0 , 'increased', 'decreased')
# sigtab <- sigtab[order(sigtab$log2FoldChange),]
# sigtab$OTU <- factor(sigtab$OTU, levels = sigtab$OTU)
#
#
# sigtab %>% ggplot(aes(x=OTU, y=log2FoldChange, fill=salm)) +
# geom_col(color='black') + coord_flip() + geom_text(aes(label=Genus, y=0))
#
### END WRAP ###
# sigtab$tissue <- tissue
# sigtab$day <- day
# sigtab$comp <- comp
# finres[[resind]] <- sigtab
# merge(FS12b@sam_data, sum_sal, by='pignum')
# below here might be on to something... LRT stuff
#ALL FECES
# D0 vs D2 within treatments
# D0 vs D7 within treatments
# D0 vs D14 within treatments
# D0 vs D21 within treatments
unique(FS12b@sam_data$pignum)
FS12b@sam_data$day <- factor(FS12b@sam_data$day, levels = c('D0', 'D2', 'D7', 'D14', 'D21'))
FS12b@sam_data$pignum <- factor(FS12b@sam_data$pignum)
FS12b.glom <- prune_samples(x = FS12b, samples = FS12b@sam_data$treatment == 'Control' & FS12b@sam_data$tissue == 'F')
FS12b.glom <- prune_taxa(taxa_sums(FS12b.glom) > 1, FS12b.glom)
FS12.de <- phyloseq_to_deseq2(FS12b.glom, ~pignum + day)
FS12.de <- DESeq(FS12.de, test = 'LRT', reduced = ~ pignum)
resultsNames(FS12.de)
test2 <- results(object = FS12.de, name = 'day_D2_vs_D0')
test7 <- results(object = FS12.de, name = 'day_D7_vs_D0')
sigtab2 <- test2[which(test2$padj < 0.1),]
sigtab7 <- test7[which(test7$padj < 0.1),]
sigtab2$log2FoldChange
sigtab7$log2FoldChange
all(rownames(sigtab2) == rownames(sigtab7))
sigtab2 = cbind(as(sigtab2, "data.frame"), as(tax_table(FS12b.glom)[rownames(sigtab2), ], "matrix"))
sigtab2$newp <- format(round(sigtab2$padj, digits = 3), scientific = TRUE)
sigtab2$Treatment <- ifelse(sigtab2$log2FoldChange >=0, 'Salmonella', 'Control')
sigtab2$OTU <- rownames(sigtab2)
sigtab2$tissue <- 'feces'
sigtab2$day <- 2
# sigtab$comp <- comp
sigtab2
### I THINK IM ON TO SOMETHING HERE.
### IDENTIFY IMPORTANT OTUS THAT SEEM TO CHANGE WITH SAL AND THEN PLOT TIME COURSE INFO
### CAN DO BY TREATMENT BUT ALSO CAN DO HIGH LOW SHEDDER SPLIT
######## SUM SAL DF ########
# ################### pig trips ##############
#
#
# min(rowSums(FS12b@otu_table))
#
#
# test <- data.frame(FS12b@otu_table)
# rownames(test)
# rowSums(test)
#
# FS12.otu.rare <- rrarefy(test, min(rowSums(test)))
#
#
#
# bray.dist <- vegdist(FS12.otu.rare, method = 'jaccard', binary = FALSE)
#
# FS12b_meta$sample_ID == rownames(FS12.otu.rare)
#
# #FS12b_meta <- data.frame(FS12b@sam_data)
#
#
#
# #FS12b_meta$
#
# FS12b_meta$shan2 <- diversity(FS12b@otu_table, base = 2)
# # FS12b_meta$shan <- diversity(FS12b@otu_table)
# # FS12b_meta$invsimp <- diversity(FS12b@otu_table, index = 'invsimpson')
#
# #FS12b_meta$day <- factor(FS12b_meta$day, levels = c('D0', 'D2', 'D7', 'D14', 'D21'))
# #FS12b_meta$treatment <- factor(FS12b_meta$treatment, levels = c('control', 'RPS', 'Acid', 'ZnCu', 'RCS', 'Bglu'))
#
# # FS12b_meta %>% filter(tissue == 'F') %>%
# # ggplot(aes(x=day, y=shan2, group=set, fill=treatment)) +
# # geom_boxplot() + scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + geom_text(aes(label=pignum))
#
# # FS12b_meta %>% filter(tissue == 'F') %>%
# # ggplot(aes(x=day, y=invsimp, group=set, fill=treatment)) +
# # geom_boxplot() + scale_fill_manual(values=c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) + geom_text(aes(label=pignum))
# #
#
#
#
# # FS12b_meta %>% filter(day == 'D21') %>% ggplot(aes(x=tissue, y=shan2, group=set, fill=treatment)) + geom_boxplot() + geom_text(aes(label=pignum))
#
#
# # FS12b_meta %>% filter(tissue == 'F') %>% ggplot(aes(x=day, y=shan, group=set, fill=treatment)) + geom_boxplot()
# # FS12b_meta %>% filter(day == 'D21') %>% ggplot(aes(x=tissue, y=shan, group=set, fill=treatment)) + geom_boxplot()
#
#
#
#
# #ggplot(FS12b_meta, aes(x=treatment, y=shan, group=set)) + geom_boxplot()
#
# # min(rowSums(shareds_test))
# # hist(rowSums(shareds_test))
# # sort(rowSums(shareds_test))
#
# dist.data <- as.data.frame(as.matrix(bray.dist))
# dist.data$from <- rownames(dist.data)
#
# dist.gather <- gather(data = dist.data, key = 'to', value = 'distance', -from)
#
# #
#
#
# dist.gather$fromPig <- gsub('([X12]+[ab]?)([NP]+)([0-9]+)([dWXDi]+)([0-9]+)([A-Z]?)', '\\3', dist.gather$from)
#
# #
#
# dist.gather$FT <- paste(dist.gather$from, dist.gather$to, sep = ' ')
#
#
#
# #dist.gather$TF <- paste(dist.gather$to, dist.gather$from, sep = ' ')
#
# ######
# # all pig pairwise #
#
# total_ground_covered <- dist.gather[grep('X12bP([0-9]+)D[0-9]+F X12bP\\1D[0-9]+F', dist.gather$FT),] %>% group_by(fromPig) %>% summarise(allpw=sum(distance),
# num=n())
#
# rooms <- read.csv('./data/Rooms.csv')
# total_ground_covered$treatment <- ifelse(total_ground_covered$fromPig %in% rooms$X6, 'control',
# ifelse(total_ground_covered$fromPig %in% rooms$X7, 'RPS',
# ifelse(total_ground_covered$fromPig %in% rooms$X8, 'Acid',
# ifelse(total_ground_covered$fromPig %in% rooms$X9, 'Zn+Cu',
# ifelse(total_ground_covered$fromPig %in% rooms$X10, 'RCS',
# ifelse(total_ground_covered$fromPig %in% rooms$X11, 'Bglu', 'asdfsa'))))))
#
#
#
#
# sum_sal$fromPig <- sum_sal$pignum
# total_ground_covered$fromPig
#
# total_ground_covered <- total_ground_covered %>% filter(num == 25)
#
# boxplot(total_ground_covered$allpw~total_ground_covered$treatment)
#
# sum_sal
# total_ground_covered <- merge(total_ground_covered, sum_sal, by = 'fromPig')
#
# ############### NEED TO READ IN SUM_SAL ################
# ########################################################
#
# total_ground_covered$treatment.y == total_ground_covered$treatment.x
# total_ground_covered <- total_ground_covered %>% mutate(treatment=treatment.x) %>% select(-treatment.x, -treatment.y)
#
# #cor.test(total_ground_covered$allpw, total_ground_covered$AULC)
#
#
# total_ground_covered %>% group_by(treatment) %>% summarise(AULCvTRIP_P=cor.test(AULC, allpw, method = 'pearson')$p.value,
# AULCvTRIP_T=cor.test(AULC, allpw, method = 'pearson')$statistic)
#
#
#
# total_ground_covered %>%
# ggplot(aes(x=allpw, y=AULC, fill=treatment, color=treatment)) +
# geom_point(size=2, shape=21) + geom_smooth(method = 'lm', se=FALSE) +
# ggtitle('Correlation between cumulative community membership change and cumulative shedding',
# subtitle = 'correlation stats: RPS pval = 0.02, control pval = 0.31, Bglu pval = 0.42') +
# xlab('Cumulative Bray-Curtis distance (presence/abscence)')
#
#
#
#
#
# #
# ######
# D0_2 <- dist.gather[grep('X12bP([0-9]+)D0F X12bP\\1D2F', dist.gather$FT),]
# #colnames(D0_2)[1] <- 'sample_ID'
# colnames(D0_2)[3] <- 'D0_2'
# D0_2 <- D0_2[,c(3,4)]
#
# D2_7 <- dist.gather[grep('X12bP([0-9]+)D2F X12bP\\1D7F', dist.gather$FT),]
# #colnames(D2_7)[1] <- 'sample_ID'
# colnames(D2_7)[3] <- 'D2_7'
# D2_7 <- D2_7[,c(3,4)]
#
# D7_14 <- dist.gather[grep('X12bP([0-9]+)D7F X12bP\\1D14F', dist.gather$FT),]
# #colnames(D7_14)[1] <- 'sample_ID'
# colnames(D7_14)[3] <- 'D7_14'
# D7_14 <- D7_14[,c(3,4)]
#
# D14_21 <- dist.gather[grep('X12bP([0-9]+)D14F X12bP\\1D21F', dist.gather$FT),]
# #colnames(D14_21)[1] <- 'sample_ID'
# colnames(D14_21)[3] <- 'D14_21'
# D14_21 <- D14_21[,c(3,4)]
#
# D0_21 <- dist.gather[grep('X12bP([0-9]+)D0F X12bP\\1D21F', dist.gather$FT),]
# #colnames(D14_21)[1] <- 'sample_ID'
# colnames(D0_21)[3] <- 'D0_21'
# D0_21 <- D0_21[,c(3,4)]
#
#
# #full_join(D0_2, D2_7)
# pig_trips <- merge(D0_2, D2_7, all = TRUE, by = 'fromPig')
# pig_trips <- merge(pig_trips, D7_14, all = TRUE, by = 'fromPig')
# pig_trips <- merge(pig_trips, D14_21, all = TRUE, by = 'fromPig')
# pig_trips <- merge(pig_trips, D0_21, all = TRUE, by = 'fromPig')
#
# #rowSums(pig_trips)
# #pig_trips <- na.omit(pig_trips)
# colnames(pig_trips[,c(2:5)])
# pig_trips$trip <- rowSums(pig_trips[,c(2:5)])
# hist(pig_trips$trip, breaks = 10)
#
#
#
# rooms <- read.csv('../FS12/Rooms.csv')
#
# # add treatment data. This probably isn't the best way to do this...
#
# #colnames(sum_sal)[1] <- 'fromPig'
#
# library(funfuns)
#
# #NMDS_ellipse(metadata = meta_test, OTU_table = shareds_test, grouping_set = 'pig_pen')
# ###############################
#
# # pig_trips$treatment <- ifelse(pig_trips$fromPig %in% rooms$X6, 'control',
# # ifelse(pig_trips$fromPig %in% rooms$X7, 'RPS',
# # ifelse(pig_trips$fromPig %in% rooms$X8, 'Acid',
# # ifelse(pig_trips$fromPig %in% rooms$X9, 'Zn+Cu',
# # ifelse(pig_trips$fromPig %in% rooms$X10, 'RCS',
# # ifelse(pig_trips$fromPig %in% rooms$X11, 'Bglu', 'asdfsa'))))))
# #
# #
#
# pig_trips <- merge(pig_trips, sum_sal, by = 'fromPig')
#
# boxplot(pig_trips$trip~pig_trips$treatment)
# boxplot(pig_trips$D0_21~pig_trips$treatment)
#
# pairwise.wilcox.test(x=pig_trips$trip, g=pig_trips$treatment, p.adjust.method = 'none')
#
# #colnames(sum_sal)[1] <- 'fromPig'
# pig_trips %>% filter(treatment == "RPS") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
#
# pig_trips %>% filter(treatment == "control") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
#
# pig_trips %>%
# ggplot(aes(x=treatment, y=trip, fill=treatment)) +
# geom_boxplot() +
# geom_jitter(shape=21, color='black', stroke=1.2, size=2, width = .2) +
# scale_fill_manual(values = c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
# ggtitle('Cumulative change in each individual pigs community stucture over 21 days') + ylab("Cumulative Jaccard distance")
#
#
#
#
# pig_trips_cor <- pig_trips %>% group_by(treatment) %>% summarise(AULCvTRIP_P=cor.test(AULC, trip)$p.value,
# AULCvTRIP_T=cor.test(AULC, trip)$statistic,
# AULCv02_P=cor.test(AULC, D0_2)$p.value,
# AULCv02_T=cor.test(AULC, D0_2)$statistic,
# AULCv27_P=cor.test(AULC, D2_7)$p.value,
# AULCv27_T=cor.test(AULC, D2_7)$statistic,
# AULCv714_P=cor.test(AULC, D7_14)$p.value,
# AULCv714_T=cor.test(AULC, D7_14)$statistic,
# AULCv1421_P=cor.test(AULC, D14_21)$p.value,
# AULCv1421_T=cor.test(AULC, D14_21)$statistic)
#
#
#
# # pig_trips %>% group_by(treatment) %>% summarise(AULCvTRIP_P=cor.test(AULC, D0_21)$p.value,
# # AULCvTRIP_T=cor.test(AULC, D0_21)$statistic)
#
#
# pig_trips %>% filter(treatment == "control") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# pig_trips %>% filter(treatment == "RPS") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# pig_trips %>% filter(treatment == "Bglu") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# pig_trips %>% filter(treatment == "Zn+Cu") %>% ggplot(aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
#
# pig_trips %>%
# ggplot(aes(x=trip, y=AULC, fill=treatment, color=treatment)) +
# geom_point(size=3, shape=21, color='black') + geom_smooth(method = 'lm', se=FALSE, size=2) +
# ggtitle('Correlation between cumulative community change and cumulative shedding',
# subtitle = 'correlation stats: RPS pval = 0.037, control pval = 0.09, Bglu pval = 0.08') +
# xlab('Cumulative Jaccard distance') + scale_color_manual(values = c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')) +
# scale_fill_manual(values = c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple'))
#
#
# c('#3399FF', 'orange', 'red', 'grey', 'purple')
# c('#33CC33', '#3399FF', 'orange', 'red', 'grey', 'purple')
#
# #testse <- cor.test(pig_trips$trip, pig_trips$AULC)
# #testse$p.value
# #testse$statistic
#
#
# ggplot(pig_trips, aes(x=trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# ggplot(pig_trips, aes(x=D0_2, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# ggplot(pig_trips, aes(x=D2_7, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# ggplot(pig_trips, aes(x=D7_14, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
# ggplot(pig_trips, aes(x=D14_21, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
#
# ggplot(pig_trips_test, aes(x=mean_trip, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm', fill = NA) + geom_text(aes(label=pignum))
#
# #ggplot(pig_trips, aes(x=sum, y=AULC, color=treatment)) + geom_point() + geom_smooth(method = 'lm')
#
# pig_trips %>% group_by(treatment) %>% summarise(num=n())
#
# apply(X = pig_trips, MARGIN = 2, FUN = mean, na.rm=TRUE)
#
# mean(pig_trips$D0_2, na.rm=TRUE)
# mean(pig_trips$D2_7, na.rm=TRUE)
# mean(pig_trips$D7_14, na.rm=TRUE)
# mean(pig_trips$D14_21, na.rm=TRUE)
#
# median(pig_trips$D0_2, na.rm=TRUE)
# median(pig_trips$D2_7, na.rm=TRUE)
# median(pig_trips$D7_14, na.rm=TRUE)
# median(pig_trips$D14_21, na.rm=TRUE)
#
#
# # looking for missing samples
#
#
# sum(shared_table[grep('P50D0', rownames(shared_table)),])
# sum(shared_table[grep('P181D7F', rownames(shared_table)),])
#
#
#
# ggplot(pig_trips, aes(x=treatment, y=trip, fill=treatment)) +
# geom_boxplot() + ylab('Cumulative bray-curtis dissimilarity (each pig)') + geom_jitter(size=2.5,width = 0.2, shape=21)+
# ggtitle('Cumulative change in community structure through Salmonella infection')
#
########### cor stuff ###########
# D0 correlations
fec_VFAs <- res.all
fec_VFAs_0 <- fec_VFAs %>% filter(time == 0) %>% mutate(day=time) %>% select(day, everything(),-time)
ttttt <- FS12b_meta %>% group_by(day) %>% nest()
FS12b_meta %>% group_by(day) %>% nest()
colnames(ttttt$data[[1]])
col_nams_map <- function(df){
colnames(df) <- paste(day)
}
map()
get_pairs <- function(df){
pp <- pairwise.wilcox.test(df$dispers.distances, df$treatment, p.adjust.method = 'none')
ppdf <- as.data.frame(pp$p.value)
ps <- data.frame(matrix(c(pp$p.value), nrow = 1))
names(ps) <- paste(c(rep(names(ppdf), each = nrow(ppdf))), "_vs_", rep(rownames(ppdf), ncol(ppdf)), sep = "")
ps
}
shan_fecal_tests <- FS12b_meta %>% filter(tissue =='F') %>% group_by(day) %>%
nest() %>% mutate(pps = map(data, get_pairs)) %>%
select(day, pps) %>% unnest() %>% select(day, starts_with('control'))
########## MISSING DATA?? #########
tttt <- FS12b_meta %>%filter(tissue =='F') %>% group_by(pignum, day) %>% tally() %>% spread(key = day, value = n)
tttt <- FS12b_meta %>% select(pignum, treatment) %>% unique() %>% left_join(tttt, by = 'pignum')
pig_trips %>% ggplot(aes(x=D0_2, y = D2_7)) + geom_point(aes(color = treatment),size=3) + geom_point()
pig_trips %>% ggplot(aes(x=D0_2, y = D7_14)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D0_2, y = D14_21)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D2_7, y = D0_2)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D2_7, y = D7_14)) + geom_point(aes(color = treatment),size=3) + geom_smooth(method = 'lm')
pig_trips %>% ggplot(aes(x=D2_7, y = D14_21)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D7_14, y = D0_2)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D7_14, y = D2_7)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D7_14, y = D14_21)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D14_21, y = D0_2)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D14_21, y = D2_7)) + geom_point(aes(color = treatment),size=3)
pig_trips %>% ggplot(aes(x=D14_21, y = D7_14)) + geom_point(aes(color = treatment),size=3)
pig_trips$missing <- ifelse(pig_trips$pignum %in% c(50,181,211,240,253,469), TRUE, FALSE)
pig_trips_test <- pig_trips[,c(1:5, 7)]
PTgath <- pig_trips_test %>% gather(key = interval, value = distance, -fromPig)
avtrp <- PTgath %>% group_by(fromPig) %>% summarise(mean_trip = mean(distance, na.rm = TRUE))
pig_trips_test <- merge(pig_trips, avtrp, by = 'fromPig')
pig_trips_test %>% ggplot(aes(x=trip, y=mean_trip)) + geom_point()
#########
phyloseq::transform_sample_counts()
phyloseq::transform_sample_counts()
FS12_RPS <- subset_taxa(FS12_RPS, taxa_sums(FS12_RPS) > 1)
# plot_bar(FS12_RPS, x='shed')
FS12_RPS_sam <- as_data_frame(FS12_RPS@sam_data)
wht <- FS12_RPS_sam %>% group_by(pignum, tissue) %>% tally()
# missing 50 and 181 fecals
FS12_RPS_otu <- as.data.frame(FS12_RPS@otu_table)
FS12_RPS_otu <- FS12_RPS_otu/rowSums(FS12_RPS_otu) # transforms to relative abundance
FS12_RPS_tax <- as.data.frame(FS12_RPS@tax_table)
FS12_RPS_tax$OTU <- rownames(FS12_RPS_tax)
#
colSums(FS12_RPS_otu)
colsums97 <- colSums(FS12_RPS_otu[grep(97, rownames(FS12_RPS_otu)),])/nrow(FS12_RPS_otu[grep(97, rownames(FS12_RPS_otu)),])
colsums_others <- colSums(FS12_RPS_otu[grep(97, rownames(FS12_RPS_otu), invert=TRUE),])/nrow(FS12_RPS_otu[grep(97, rownames(FS12_RPS_otu), invert=TRUE),])
lowerin97 <- (colsums97 - colsums_others) < 0
higherin97 <- (colsums97 - colsums_others) > 0
#
FS12_RPS_otu$sample_ID <- rownames(FS12_RPS_otu)
FS12_RPS_all <- merge(FS12_RPS_sam, FS12_RPS_otu, by='sample_ID')
FS12_RPS_all[1:10, 1:10]
FS12_gath <- FS12_RPS_all %>% gather(key=OTU, value=relabund, -(sample_ID:shed))
FS12_RPS_tax
FS12_gath %>% ggplot(aes(x=pignum, y=relabund)) + geom_col()
|
ebab9efe0de3f618818cb6aed3badcec13a81d9e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/anomaly/man/show-methods.Rd
|
2d9a8053ab499f06499c7c54dcb66b986fd9469b
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,393
|
rd
|
show-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bard.R, R/capa.R, R/capa.mv.R, R/capa.uv.R,
% R/pass.class.R
\docType{methods}
\name{show}
\alias{show}
\alias{show,bard.class-method}
\alias{show,bard.sampler.class-method}
\alias{show,capa.class-method}
\alias{show,capa.mv.class-method}
\alias{show,capa.uv.class-method}
\alias{show,pass.class-method}
\title{Displays S4 objects produced by capa methods.}
\usage{
\S4method{show}{bard.class}(object)
\S4method{show}{capa.class}(object)
\S4method{show}{capa.mv.class}(object)
\S4method{show}{capa.uv.class}(object)
\S4method{show}{pass.class}(object)
}
\arguments{
\item{object}{An instance of an S4 class produced by \code{\link{capa}}, \code{\link{capa.uv}}, \code{\link{capa.mv}}, \code{\link{pass}}, \code{\link{bard}}, or \code{\link{sampler}}.}
}
\description{
Displays S4 object produced by \code{\link{capa}}, \code{\link{capa.uv}}, \code{\link{capa.mv}}, \code{\link{pass}}, \code{\link{bard}}, and \code{\link{sampler}}.
The output displayed depends on the type of S4 object passed to the method. For all types, the output indicates whether the data is univariate or
multivariate, the number of observations in the data, and the type of change being detected.
}
\seealso{
\code{\link{capa}},\code{\link{capa.uv}},\code{\link{capa.mv}},\code{\link{pass}},\code{\link{bard}},\code{\link{sampler}}.
}
|
bbd1fe1ddef2a9f78325f4fbc5231da7b9e58382
|
5103964f10540aa2aa78df58cd943fcc14295d08
|
/analysis scripts/stratification_analysis.r
|
6d2744b257090f6e5479009764795c3cfe6aaa42
|
[
"MIT"
] |
permissive
|
javipus/mcrds_public
|
216917afc0f8afe348b7f0bff7f5fdea419c119f
|
43bb44cc8b6b79536e4b2afd0a7c724e90f137f7
|
refs/heads/master
| 2023-01-29T22:01:21.302921
| 2020-12-14T14:48:38
| 2020-12-14T14:48:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
stratification_analysis.r
|
# Stratiifaction analysis of acute outcomes
#first load libraries
library(lme4)
library(lmerTest)
library(emmeans)
pacutes = read.csv("[PATH TO FILE]/pacutes.csv")
# to reproduce the stratification analysis, define the same model as before (acute_analysis.r)
# and add the guess + guess*condition terms (the PANAS is shown in example but works the same for all other outcomes)
m = lmer(formula = value ~ (1|trial_id) + condition + guess + guess*condition + expectation + psychiatric_past, data=subset(pacutes, test_name=='PANAS'))
# then, define the estimated marginal means to compare the 4 strata
emm_fix_guess = emmeans(m, specs = pairwise ~ condition|guess) # stratas with fixed guess
emm_fx_cond = emmeans(m, specs = pairwise ~ guess|condition) # stratas with fixed condition
emm_fix_guess$contrasts # Comparison of strata with fixed guess (i.e. the two comparisons in the top row of fig5 - PL/PL vs MD/PL and PL/MD vs MD/MD)
emm_fix_cond$contrasts # Comparison of strata with fixed condition (i.e. the bottom two comparisons on fig 5 - PL/PL vs PL/MD and MD/PL vs MD/MD)
|
d20750ee0f2aaf0bfc9e55c40c914e0741743a39
|
6de90602b0d82a5e5b08f00a7305ba5f1e61ed33
|
/Main/K35/LoadSubjK35.R
|
db38564bfd180d9bbc995e13dd842d029cbb9710
|
[
"MIT"
] |
permissive
|
NeuroStat/PaperStudyCharCBMA
|
b2bb32af7d2088ce0c2aad84fa851d209434eeeb
|
67b140af59eb415964749bf2e2cf7f9fd4528f3d
|
refs/heads/master
| 2021-03-29T16:53:24.139381
| 2017-12-22T12:34:33
| 2017-12-22T12:34:33
| 90,959,743
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,974
|
r
|
LoadSubjK35.R
|
####################
#### TITLE: Load the data frames with information about the sampled subjects: sampling without replacement in studies.
#### Contents:
####
#### Source Files: //Meta\ Analyis/R\ Code/Studie_CBMA/PaperStudyCharCBMA.git/
#### First Modified: 12/05/2016
#### Notes:
#################
##
###############
### Notes
###############
##
# NOTE: ALL SUBJECT IDS ARE ANONYMIZED!!
# Sampling subjects has been done using the SamplingK35.R file.
# We use this file to write text files with the subject numbers
# to the StudySamSubjDouble.sh file.
# SETTING:
# The subjects in the smaller groups are sampled without replacement.
# This means they cannot end up twice in the studies.
# We will use this setting to calculate reliability of a meta-analysis.
##
###############
### Preparation
###############
##
# Take arguments from master file
args <- commandArgs(TRUE)
# Set working directory
wd <- as.character(args)[1]
setwd(wd)
# Which run are we in?
RUN <- as.numeric(as.character(args)[2])
# Location of the data frames
LOCFRAME <- as.character(args)[3]
##
###############
### Loading and writing files
###############
##
# Load the study sample sizes
load(paste(LOCFRAME, '/StudySamSubjK35.RData', sep=''))
# Select the subjects from this run
RunSubj <- StudySamSubj[StudySamSubj$run == RUN,]
# Number of studies
NS <- length(unique(StudySamSubj$study))
# For loop over the studies
for(s in 1:NS){
# Take the subjects
sampledData <- RunSubj[RunSubj$study == s,'subjects']
# Write them to txt file
cat(sampledData,file=paste(wd,"/Study_",s,"/study_",s,".txt",sep=""),sep='\n')
}
###############################
###############################
# Load the run reference subjects
load(paste(LOCFRAME, '/RefSamSubjK35.RData', sep=''))
# Select the subjects form this run
RunRefSubj <- RefSamSubj[RefSamSubj$run == RUN,'subjects']
# Write to txt file
cat(RunRefSubj,sep='\n',file=paste(wd,'/groupSubjects.txt',sep=''))
|
53fa26445186c5f5dc3979b6d56a7c1f813dbdc1
|
6eb6be10dfb00975aa041b19b47ef2511808096d
|
/ExData_Plotting1-master/plot4.R
|
b25f8dab11fbcee726e18cc50f72b85fca9598f7
|
[] |
no_license
|
yashika-sindhu/datasciencecoursera
|
5e72af030f83d7ba90433da32af1bc2940b50b54
|
971ceb4526935374250fa646d8722b7e94bb0ed6
|
refs/heads/master
| 2022-01-22T04:59:26.645366
| 2019-07-22T09:35:49
| 2019-07-22T09:35:49
| 116,703,715
| 0
| 1
| null | 2018-01-09T05:04:23
| 2018-01-08T16:57:01
| null |
UTF-8
|
R
| false
| false
| 2,342
|
r
|
plot4.R
|
## Download the dataset
download.file(
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Electric_Power_dataset.zip"
)
## Unzip the data
unzip("Electric_Power_dataset.zip")
## Read the relevant data into R
install.packages("sqldf")
library(sqldf)
my_data<-read.csv.sql(
"household_power_consumption.txt",
sql="select * from file where Date='1/2/2007' or Date='2/2/2007'",
sep=";"
)
## Convert Date and Time column to Date/Time class POSIXct
my_data$Date<-as.POSIXct(paste(as.Date(my_data$Date,"%d/%m/%Y"),my_data$Time))
my_data$Time<-NULL
## Convert the Datetime to numeric value to plot it on the graph
my_data$Date<-as.numeric(my_data$Date)
## PNG file is opened
png(filename="plot4.png",width=480,height=480)
## Plot4 is created with the multiple plots
par(mfrow=c(2,2),cex=0.70)
## Plot 1 of the 4 plots Days vs Active Power
plot(
my_data$Date,
my_data$Global_active_power,
type="n",
ylab="Global Active Power",
xlab="",
xaxt="n"
)
lines(my_data$Date,my_data$Global_active_power)
axis(
side=1,
at=c(1170268200,1170354660,1170441060),
labels=c("Thu","Fri","Sat")
)
## Plot 2 of the 4 Plots Days vs Voltage
plot(
my_data$Date,
my_data$Voltage,
type="n",
ylab="Voltage",
xlab="datetime",
xaxt="n"
)
lines(my_data$Date,my_data$Voltage)
axis(
side=1,
at=c(1170268200,1170354660,1170441060),
labels=c("Thu","Fri","Sat")
)
## Plot3 of the 4 plots Day vs Metering
plot(
my_data$Date,
my_data$Sub_metering_1,
type="n",
ylab="Energy sub metering",
xlab="",
xaxt="n"
)
axis(
side=1,
at=c(1170268200,1170354660,1170441060),
labels=c("Thu","Fri","Sat")
)
lines(my_data$Date,my_data$Sub_metering_1,col="black")
lines(my_data$Date,my_data$Sub_metering_2,col="red")
lines(my_data$Date,my_data$Sub_metering_3,col="blue")
legend(
"topright",
inset=0.015,
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),
lty=1,
box.lty=0
)
## Plot 4 of the 4 plots Day vs Reactive Power
plot(
my_data$Date,
my_data$Global_reactive_power,
type="n",
ylab="Global_reactive_power",
xlab="datetime",
xaxt="n"
)
lines(my_data$Date,my_data$Global_reactive_power)
axis(
side=1,
at=c(1170268200,1170354660,1170441060),
labels=c("Thu","Fri","Sat")
)
## Close the PNG connection
dev.off()
|
bfa5c5bb6c72c1cdf938eb7f2bed5de1c87c1229
|
7547f30e8151d75850182ab76f6a76714f4bc90d
|
/Theoretischer Teil/Skript_zur_VL.R
|
3ca9cb354218578e09c0bc9bec0c6c1e2401d580
|
[] |
no_license
|
KerstinPierick/RWorkshop
|
80d292ed8faf3e9cd0abe64d76baaef409a35e4d
|
e8e378c24da238400d8df3b2b4ea968106c6c8fb
|
refs/heads/master
| 2022-07-21T13:11:58.471113
| 2022-07-13T16:43:13
| 2022-07-13T16:43:13
| 202,128,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,843
|
r
|
Skript_zur_VL.R
|
###################################################################
######## "Statistik und Programmieren mit R" ######################
###################################################################
########### Workshop von Kerstin Pierick ##########################
################### Campus 2019 ###################################
###################################################################
############### Skript zur Vorlesung ##############################
# 1. Grundrechenoperationen ---------------------------------------
1
1 + 2
2 - 1
3 * 3
10/2
5^2
# Logische Abfragen
5 == 5 # gleich
5 > 5 # größer als
5 >= 5 # größer gleich
5 != 5 # ungleich
# 2. Objekte und Zuordnung ----------------------------------------
a <- 5 # Der Operator "<-" weist a den Wert 5 zu
a
x = 5
x
# Jetzt kann mit dem Objekt a gerechnet werden
a * 2
# Regeln für Benennung von Objekten:
# - case sensitive
A
# - Nur Zahlen, Buchstaben, "." und "_"
# - darf nicht mit Zahl anfangen, muss Buchstaben enthalten
# Objekte werden überschrieben, wenn man sie erneut zuweist
a <- 6
a
# 3. Vektoren ---------------------------------------------------
b <- c(3, 4, 5, 6) # Die Funktion c() fügt die einzelnen Elemente zu einem Vektor zusammen
b
c <- 3:6 # Der Doppelpunkt kann mit "bis" übersetzt werden
c
# Überprüfen, ob b und c übereinstimmen
b == c
# Einzelne Elemente aufrufen
b[1]
b[1:3]
b[c(1, 4)]
b[b <= 3]
# Mit Vektoren rechnen
a * b
d <- a * b # Ergebnis als Objekt d speichern
d
b + c
# 4. Funktionen ---------------------------------------------------
log(a) # Logarithmus von a
sqrt(a) # Quadratwurzel von a
sqrt(b) # Bei Vektoren mit >1 Elementen: Funktion wird auf jedes Element angewendet
sum(b) # Summe
mean(b) # Mittelwert
sd(b) # Standardabweichung
length(b) # Länge des Vektors
cor(b, d) # Korrelationskoeffizient zweier Vektoren
# Eigene Funktion schreiben
# Für die Berechnung des Standardfehlers des Mittelwerts
se_of_mean <- function(x){
sd(x)/sqrt(length(x))
}
se_of_mean(b)
# Funktionen für einfache Plots
hist(b)
plot(b, d)
# 5. Pakete -----------------------------------------------------
# Beispiel-Datensatz von R
data(iris)
iris
# Installation
install.packages("dplyr") # Installiert das Paket dplyr (bei install.packages immer mit "")
# Paket laden
library(dplyr) # Stellt die Funktionen im Paket dplyr bereit (hier immer ohne "")
# Dokumentation des Pakets
help(package = dplyr)
# Eine Funktion aus dem Paket: select()
select(iris, Petal.Length)
# 6. Lineare Regression ------------------------------------------------------
# Modell formulieren: dist in Abhängigkeit von speed aus dem Datensatz cars
mod <- lm(dist ~ speed, data=cars)
# Erbebnis begutachten
summary(mod)
plot(cars$speed, cars$dist)
abline(mod)
|
10fcc07aacc43bd9efd6ca9bd72e4e1ff3dbebd7
|
547f84f7397b7fc0ac91b5e680c4554d6b5dff72
|
/rcpp-code/MultivarTV/man/predict.mvtv.Rd
|
b04e6a40921461c32faa313f3e722afc451254a3
|
[] |
no_license
|
brayano/MultivarTV
|
461a6992dfea8c4fe37d2fff1ad17b527d1607ce
|
89cf66a0cf7fa2e087574e9df9f688ea89f4defc
|
refs/heads/master
| 2021-03-24T12:42:13.330960
| 2018-04-30T19:13:52
| 2018-04-30T19:13:52
| 120,711,985
| 0
| 0
| null | 2018-04-30T19:13:53
| 2018-02-08T04:44:14
|
HTML
|
UTF-8
|
R
| false
| true
| 1,004
|
rd
|
predict.mvtv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultivarTV.R
\name{predict.mvtv}
\alias{predict.mvtv}
\title{MVTV Predict for Fitting Observed/New Data}
\usage{
\method{predict}{mvtv}(object, data = NULL, mesh = NULL, ...)
}
\arguments{
\item{object}{object produced by mvtv.default}
\item{data}{n by p matrix of inputs}
\item{mesh}{m by p mesh used by fitting function mvtv}
\item{...}{ignore}
}
\description{
Use fitted 'mvtv' object to predict new data.
}
\examples{
# Approximating Bivariate Fused Lasso for Uniform Data
## Generate Data
set.seed(117)
x <- matrix(runif(100),ncol = 2)
y <- matrix(runif(50),ncol=1)
m <- matrix(c(3,3))
## Find 5-fold validated MBS Model over range of lambdas
mbs_fold5 <- mvtv(x,y,m,folds=5,verbose=FALSE)
# Access fitted values of training data; equivalent to mbs_fold5$fitted
fitted.values <- predict(mbs_fold5)
newdata <- matrix( runif(50), ncol = 2) # Generate new data
newfits <- predict(mbs_fold5, newdata) # Fit new data
}
|
1f4f3ca1b74e9a896dae24ac8204d63d2776ce7c
|
c6076132c2740f2abbf3504eda9bfdd3c62a7969
|
/man/maximize_spline_metric.Rd
|
4836881626f742146931603fa0b94b2d303d6471
|
[] |
no_license
|
Thie1e/cutpointr
|
ae5866d8bd685bc5679352f8960c22ef99f3b93e
|
b84a39cc88bdeee788123c647d5cae50e5ee42e1
|
refs/heads/master
| 2022-04-30T01:28:35.752777
| 2022-04-13T17:32:19
| 2022-04-13T17:32:19
| 74,686,042
| 80
| 20
| null | 2022-01-18T11:46:51
| 2016-11-24T15:41:00
|
R
|
UTF-8
|
R
| false
| true
| 4,836
|
rd
|
maximize_spline_metric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimize_metric.R
\name{maximize_spline_metric}
\alias{maximize_spline_metric}
\alias{minimize_spline_metric}
\title{Optimize a metric function in binary classification after spline smoothing}
\usage{
maximize_spline_metric(
data,
x,
class,
metric_func = youden,
pos_class = NULL,
neg_class = NULL,
direction,
w = NULL,
df = NULL,
spar = 1,
nknots = cutpoint_knots,
df_offset = NULL,
penalty = 1,
control_spar = list(),
tol_metric,
use_midpoints,
...
)
minimize_spline_metric(
data,
x,
class,
metric_func = youden,
pos_class = NULL,
neg_class = NULL,
direction,
w = NULL,
df = NULL,
spar = 1,
nknots = cutpoint_knots,
df_offset = NULL,
penalty = 1,
control_spar = list(),
tol_metric,
use_midpoints,
...
)
}
\arguments{
\item{data}{A data frame or tibble in which the columns that are given in x
and class can be found.}
\item{x}{(character) The variable name to be used for classification,
e.g. predictions or test values.}
\item{class}{(character) The variable name indicating class membership.}
\item{metric_func}{(function) A function that computes a
metric to be optimized. See description.}
\item{pos_class}{The value of class that indicates the positive class.}
\item{neg_class}{The value of class that indicates the negative class.}
\item{direction}{(character) Use ">=" or "<=" to select whether an x value
>= or <= the cutoff predicts the positive class.}
\item{w}{Optional vector of weights of the same length as x; defaults to all 1.}
\item{df}{The desired equivalent number of degrees of freedom
(trace of the smoother matrix). Must be in (1,nx], nx the number of
unique x values.}
\item{spar}{Smoothing parameter, typically (but not necessarily) in (0,1].
When spar is specified, the coefficient lambda of the integral of the squared
second derivative in the fit (penalized log likelihood) criterion is a
monotone function of spar.}
\item{nknots}{Integer or function giving the number of knots. The function
should accept data and x (the name of the predictor variable) as inputs.
By default nknots = 0.1 * log(n_dat / n_cut) * n_cut where n_dat is the
number of observations and n_cut the number of unique predictor values.}
\item{df_offset}{Allows the degrees of freedom to be increased by df_offset
in the GCV criterion.}
\item{penalty}{The coefficient of the penalty for degrees of freedom in the
GCV criterion.}
\item{control_spar}{Optional list with named components controlling the root
finding when the smoothing parameter spar is computed, i.e., NULL. See
help("smooth.spline") for further information.}
\item{tol_metric}{All cutpoints will be returned that lead to a metric
value in the interval [m_max - tol_metric, m_max + tol_metric] where
m_max is the maximum achievable metric value. This can be used to return
multiple decent cutpoints and to avoid floating-point problems.}
\item{use_midpoints}{(logical) If TRUE (default FALSE) the returned optimal
cutpoint will be the mean of the optimal cutpoint and the next highest
observation (for direction = ">") or the next lowest observation
(for direction = "<") which avoids biasing the optimal cutpoint.}
\item{...}{Further arguments that will be passed to metric_func.}
}
\value{
A tibble with the columns \code{optimal_cutpoint}, the corresponding metric
value and \code{roc_curve}, a nested tibble that includes all possible cutoffs
and the corresponding numbers of true and false positives / negatives and
all corresponding metric values.
}
\description{
Given a function for computing a metric in \code{metric_func}, this function
smoothes the function of metric value per cutpoint using smoothing splines. Then it
optimizes the metric by selecting an optimal cutpoint. For further details
on the smoothing spline see \code{?stats::smooth.spline}.
The \code{metric} function should accept the following inputs:
\itemize{
\item \code{tp}: vector of number of true positives
\item \code{fp}: vector of number of false positives
\item \code{tn}: vector of number of true negatives
\item \code{fn}: vector of number of false negatives
}
}
\details{
The above inputs are arrived at by using all unique values in \code{x}, Inf, and
-Inf as possible cutpoints for classifying the variable in class.
}
\examples{
oc <- cutpointr(suicide, dsi, suicide, gender, method = maximize_spline_metric,
df = 5, metric = accuracy)
plot_metric(oc)
}
\seealso{
Other method functions:
\code{\link{maximize_boot_metric}()},
\code{\link{maximize_gam_metric}()},
\code{\link{maximize_loess_metric}()},
\code{\link{maximize_metric}()},
\code{\link{oc_manual}()},
\code{\link{oc_mean}()},
\code{\link{oc_median}()},
\code{\link{oc_youden_kernel}()},
\code{\link{oc_youden_normal}()}
}
\concept{method functions}
|
2c441c3c3a94802bc35332b8d836fc0516254e7a
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/torch_linspace.Rd
|
e6031ffc522023a5b62df5142454e64f432d276a
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 1,879
|
rd
|
torch_linspace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/creation-ops.R, R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_linspace}
\alias{torch_linspace}
\title{Linspace}
\usage{
torch_linspace(
start,
end,
steps = 100,
dtype = NULL,
layout = NULL,
device = NULL,
requires_grad = FALSE
)
}
\arguments{
\item{start}{(float) the starting value for the set of points}
\item{end}{(float) the ending value for the set of points}
\item{steps}{(int) number of points to sample between \code{start} and \code{end}. Default: \code{100}.}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. Default: if \code{NULL}, uses a global default (see \code{torch_set_default_tensor_type}).}
\item{layout}{(\code{torch.layout}, optional) the desired layout of returned Tensor. Default: \code{torch_strided}.}
\item{device}{(\code{torch.device}, optional) the desired device of returned tensor. Default: if \code{NULL}, uses the current device for the default tensor type (see \code{torch_set_default_tensor_type}). \code{device} will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.}
\item{requires_grad}{(bool, optional) If autograd should record operations on the returned tensor. Default: \code{FALSE}.}
}
\description{
Linspace
}
\section{linspace(start, end, steps=100, out=NULL, dtype=NULL, layout=torch.strided, device=NULL, requires_grad=False) -> Tensor }{
Returns a one-dimensional tensor of \code{steps}
equally spaced points between \code{start} and \code{end}.
The output tensor is 1-D of size \code{steps}.
}
\examples{
if (torch_is_installed()) {
torch_linspace(3, 10, steps=5)
torch_linspace(-10, 10, steps=5)
torch_linspace(start=-10, end=10, steps=5)
torch_linspace(start=-10, end=10, steps=1)
}
}
|
1e1bf82368a75d60279e120f954af1364d46fb73
|
5a9fad5bf2b3f91ee6802d342546408217eeff14
|
/R/prepare_dictionary_ngram.R
|
8a8968820992e6499716a25a64a6821c8f403554
|
[] |
no_license
|
phileas-condemine/bodily_injury_atp
|
c02c6dae8046fa26b0fca814567522dce08817fb
|
8bb594e5e757cedea080f3da476c9e8a1ad682a6
|
refs/heads/master
| 2021-05-07T06:05:29.691425
| 2017-12-12T09:22:36
| 2017-12-12T09:22:36
| 111,701,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
prepare_dictionary_ngram.R
|
load("Documents/bodily_injury_atp/data/CAPP_1ST_2ND_INSTANCES/CAPP_text_extraction.RData")
library(text2vec)
library(magrittr)
pattern="corporel"
ngram=3L
n_cores=28
token <- CAPP_docs %>%word_tokenizer
itokenized <- text2vec::itoken(token,ids = 1:length(CAPP_docs), progressbar = FALSE)
dictionary <- create_vocabulary(itokenized,stopwords = c(tm::stopwords(kind = 'fr')), ngram = c(ngram_min=1L,ngram_max=ngram))
dictionary.pruned <- prune_vocabulary(dictionary,
term_count_min = 100,
doc_proportion_max = 0.5)
vectorizer<-vocab_vectorizer(dictionary.pruned)
matrix_ngrams<-text2vec::create_dtm(itokenized,vectorizer)
save(list="matrix_ngrams",file="Documents/bodily_injury_atp/data/CAPP_1ST_2ND_INSTANCES/CAPP_ngrams.RData")
|
640629aa8a5b548a55f7b5622bae48179941f5ff
|
2e280dbf7411ea0c1b485e2587fd2b94c0be875b
|
/tp.r
|
32a891252ec356ec5e5ab11b305a1d2d04c639bd
|
[] |
no_license
|
badbayard/tp_R
|
49229cb05a2a3cf6e6c6459df991e9a7e5a9d037
|
3a720b43031a344f0af5d55979b1f9efd504cf43
|
refs/heads/master
| 2020-03-22T05:09:13.311432
| 2018-07-03T07:37:26
| 2018-07-03T07:37:26
| 139,545,849
| 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 211
|
r
|
tp.r
|
exo 1 ša marche c'est cool :)
exo 2 x<-c(0,7,8)
y<-c(5,6,x[2],x[3],10,11,12,0,x[2],x[3])
y[3] y[5] y[8] y[9]
y[y[]<=8]
[1] 5 6 7 8 0 7 8
> y[-2]
[1] 5 7 8 10 11 12 0 7 8
|
de1ef17832dab5b2514a24f7d1b42bd1fd051653
|
902037115141ead7b315e7b63e437ec61c01c2c1
|
/R/ia.samp.R
|
323f5476e2208b7c92e260c7601bd1e8dc97680d
|
[] |
no_license
|
cran/scrime
|
4bdc7e989ba9e648d004ca47cd2d10bb5e78a717
|
cf0033dbfe2a6fa807593a460ef4bcb0931db96a
|
refs/heads/master
| 2021-06-02T21:50:17.706604
| 2018-12-01T10:00:03
| 2018-12-01T10:00:03
| 17,699,500
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
ia.samp.R
|
`ia.samp` <-
function(n.pair,conj=0){
mat<-matrix(0,2^n.pair,n.pair)
for(i in 1:n.pair)
mat[,i]<-rep(rep(c(1,conj),e=2^(n.pair-i)),2^(i-1))
mat
}
|
bb49a2a14e004d8bfa057e5376f2a77cf13a4d9a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pathological/tests/test_decompose_path.R
|
3402cd54bc13fa5386717559b9bb4a57ce05b7b3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,375
|
r
|
test_decompose_path.R
|
create_expected_decomposed_path <- function(dirname, filename, extension, row.names)
{
structure(
data.frame(
dirname = dirname,
filename = filename,
extension = extension,
row.names = row.names,
stringsAsFactors = FALSE
),
class = c("decomposed_path", "data.frame")
)
}
test_that(
"decompose_path works with a zero length input",
{
x <- character()
x2 <- NULL
expected <- create_expected_decomposed_path(
dirname = character(),
filename = character(),
extension = character(),
row.names = character()
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
actual2 <- decompose_path(x2)
expect_s3_class(actual2, "decomposed_path")
expect_equal(actual2$dirname, expected$dirname)
expect_equal(actual2$filename, expected$filename)
expect_equal(actual2$extension, expected$extension)
expect_equal(rownames(actual2), rownames(expected))
}
)
test_that(
"decompose_path handles paths with no directory and a single extension in the filename.",
{
skip_on_cran()
x <- "foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles paths with a directory and a single extension in the filename.",
{
x <- "somedir/foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "somedir"),
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles paths with no directory and a double extension in the filename.",
{
skip_on_cran()
x <- "foo.tar.gz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "foo",
extension = "tar.gz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles paths with a directory and a double extension in the filename.",
{
x <- "somedir/foo.tar.gz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "somedir"),
filename = "foo",
extension = "tar.gz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles paths with no directory and no extension in the filename.",
{
skip_on_cran()
x <- "foo"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "foo",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles paths with a directory and no extension in the filename.",
{
x <- "somedir/foo"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "somedir"),
filename = "foo",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles filenames containing a '.' and an extension.",
{
skip_on_cran()
x <- "foo. bar.zip"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "foo. bar",
extension = "zip",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles backslashes in the directory name.",
{
x <- "somedir\\foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "somedir"),
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles mixed forward and backslashes in the directory name.",
{
x <- "somedir\\another dir/foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "somedir", "another dir"),
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles absolute paths to directories.",
{
x <- R.home()
expected_dir <- normalizePath(R.home(), "/", mustWork = FALSE)
substring(expected_dir, 1, 1) <- toupper(substring(expected_dir, 1, 1))
expected <- create_expected_decomposed_path(
dirname = expected_dir,
filename = "",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles '~'.",
{
x <- "~"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = normalizePath("~", "/", mustWork = FALSE),
filename = "",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles files inside '~'.",
{
x <- "~/foo.tgz"
expected <- create_expected_decomposed_path(
dirname = normalizePath(dirname(x), "/", mustWork = FALSE),
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles the current directory as '.'.",
{
skip_on_cran()
x <- "."
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles the parent directory as '..'.",
{
skip_on_cran()
x <- ".."
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = dirname(pwd),
filename = "",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles files inside '.'.",
{
skip_on_cran()
x <- "./foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = pwd,
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles empty strings.",
{
x <- ""
expected <- create_expected_decomposed_path(
dirname = "",
filename = "",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles missing paths.",
{
x <- NA
expected <- create_expected_decomposed_path(
dirname = NA_character_,
filename = NA_character_,
extension = NA_character_,
row.names = "<NA>"
)
expect_warning(
actual <- decompose_path(x),
"Coercing .+ to class .character.\\."
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
catz <- c(
"catz/lolcat.gif",
"moar cats/nyan cat.jpeg",
"catz\\catz in loft\\ceiling cat.jpg",
"catz/musical catz\\keyboard cat.bmp",
"catbread.png",
"kitties\\bonsai kitten.tiff",
"kitties\\hipster kitty.pdf"
)
pwd <- std_getwd()
expected_catz <- create_expected_decomposed_path(
dirname = c(
file.path(pwd, "catz"),
file.path(pwd, "moar cats"),
file.path(pwd, "catz/catz in loft"),
file.path(pwd, "catz/musical catz"),
pwd,
file.path(pwd, "kitties"),
file.path(pwd, "kitties")
),
filename = c(
"lolcat", "nyan cat", "ceiling cat", "keyboard cat",
"catbread", "bonsai kitten", "hipster kitty"
),
extension = c(
"gif", "jpeg", "jpg", "bmp", "png", "tiff", "pdf"
),
row.names = catz
)
test_that(
"decompose_path works with a character vector input.",
{
skip_on_cran()
x <- catz
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected_catz$dirname)
expect_equal(actual$filename, expected_catz$filename)
expect_equal(actual$extension, expected_catz$extension)
expect_equal(rownames(actual), rownames(expected_catz))
}
)
test_that(
"decompose_path works with a factor input.",
{
skip_on_cran()
x <- factor(catz)
expect_warning(
actual <- decompose_path(x),
"Coercing .+ to class .character.\\."
)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected_catz$dirname)
expect_equal(actual$filename, expected_catz$filename)
expect_equal(actual$extension, expected_catz$extension)
expect_equal(rownames(actual), rownames(expected_catz))
}
)
test_that(
"decompose_path handles paths with a unicode directory name.",
{
x <- "\u0108\u0158\u0104\u0143/foo.tgz"
pwd <- std_getwd()
expected <- create_expected_decomposed_path(
dirname = file.path(pwd, "\u0108\u0158\u0104\u0143"),
filename = "foo",
extension = "tgz",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles UNC paths with forward slashes.",
{
x <- "//foo/bar"
expected <- create_expected_decomposed_path(
dirname = if(is_windows()) "\\\\foo" else "/foo",
filename = "bar",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path handles UNC paths with backslashes.",
{
x <- "\\\\foo/bar"
expected <- create_expected_decomposed_path(
dirname = "\\\\foo",
filename = "bar",
extension = "",
row.names = x
)
actual <- decompose_path(x)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
test_that(
"decompose_path works with NTFS Junctions",
{
skip_if_not(assertive.reflection::is_windows())
skip_on_cran()
source_dir <- tempfile("source")
target_dir <- tempfile("target")
create_dirs(target_dir)
create_ntfs_junction(source_dir, target_dir)
x <- c(source_dir, file.path(source_dir, "foo.bar"))
actual <- decompose_path(x)
expected <- create_expected_decomposed_path(
dirname = rep.int(standardize_path(source_dir), 2),
filename = c("", "foo"),
extension = c("", "bar"),
row.names = x
)
expect_s3_class(actual, "decomposed_path")
expect_equal(actual$dirname, expected$dirname)
expect_equal(actual$filename, expected$filename)
expect_equal(actual$extension, expected$extension)
expect_equal(rownames(actual), rownames(expected))
}
)
|
8fe19749c9631d1ee07dd783a07c9acd461168d1
|
9b50e27c9b97e4693a2a98040157f74f8c7c6525
|
/man/rotate.somites.Rd
|
046c17707faccc0b45328fff7520b98ac8c7f662
|
[] |
no_license
|
erinboyleanderson/CellTrackingEBA
|
ece73ea8930478ecb4dd3085889f518ad5c26f9b
|
f8640cf4fbf8c3f2849b379e02c4be9d88ed7a28
|
refs/heads/master
| 2020-03-21T16:40:50.732377
| 2018-12-11T21:30:50
| 2018-12-11T21:30:50
| 138,785,925
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 998
|
rd
|
rotate.somites.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rotate.somites.R
\name{rotate.somites}
\alias{rotate.somites}
\title{Function for rotating the somites}
\usage{
rotate.somites(somiteDF, bros, side)
}
\arguments{
\item{somiteDF}{Dataframe containing the boundary information. must include the following column names: Embryo, X, Y, boundary. Embryo is the number of the embryo, Boundary is the somite boundary, must be in the form 0-1, 1-2, etc where the numbers represent the somites that are being bound (ie 0-1 is the boundary between somite 0 and somite 1) ALSO MUST INCLUDE minimally boundary 0-1 and 5-6!}
\item{bros}{number of embryos in the dataframe}
\item{side}{dataframe consisting of the number of the embryo, the side (L vs R) and a multiplier (-1 or 1) based on if the embryo is on the L or R side}
}
\description{
This function rotates the somites so they are aligned correctly. This is important for labelling the cells with the somite identity later
}
|
3f20e0d13c7a28122e1e3a354de0594fe1c27c61
|
0df826d83af76bec2e82c823beeca216057dac38
|
/R/langmuirTrans.R
|
d981c434fa994e84a2ef07a74cbcf300bde31784
|
[] |
no_license
|
hdraisma/quantroSim
|
07caebf668a7973c0d8d5c95563fb87516da82c2
|
ef4720f1c1bb41dccad5deddbc876c157e3e0bb2
|
refs/heads/master
| 2020-08-01T17:54:54.215757
| 2018-08-22T13:26:15
| 2018-08-22T13:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 600
|
r
|
langmuirTrans.R
|
#' @title Langmuir adsorption transformation
#'
#' @description To model DNA methylation and gene expression from arrays
#' this function represents the saturation reached in the arrays.
#'
#' @param x expected number of methylated molecules after PCR and
#' bisulfite-sequencing
#' @param a intensity from scanner
#' @param b scale parameter
#' @param d background noise
#'
#' @author Stephanie Hicks
#' @export
langmuirTrans <- function(x, a, b, d){
if(length(a) == 1){ rep(a, length(x))}
if(length(b) == 1){ rep(b, length(x))}
if(length(d) == 1){ rep(d, length(x))}
d + a * (x / (x + b))
}
|
b3bc92d3740aa015fab4211edb444a057988efd6
|
9bd88feb5cd6ab8bc54a443c2f0037cf09c0c299
|
/analysis/_fl/xx_twitter_follower_summary.R
|
e995820c6c28300e29ec4c660494c533be4b172c
|
[] |
no_license
|
gmaubach/of-dollars-and-data
|
9fc404f391f4cf3b840e76ca79660cb62873780a
|
b1b50c3aa132e6b8e4a045c5c136c08760d587f2
|
refs/heads/master
| 2023-07-09T04:17:02.890021
| 2021-08-18T01:18:35
| 2021-08-18T01:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,553
|
r
|
xx_twitter_follower_summary.R
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(rtweet)
library(httpuv)
library(tidyverse)
folder_name <- "/_fl/xx_twitter_follower_summary"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
# My app name
appname <- "TweetScraper22"
# API key
key <- 'TMRoCTZ2Eis1O7ELovhs8ni9X'
# API secret
secret <- 'trdbfmbVNG5BOiJyvX4pUDmtccN0KV5Y5AWNcz2zKUbzhwus27'
# Login token
twitter_token <- create_token(
app = appname,
consumer_key = key,
consumer_secret = secret)
# List of handles
handles <- c("dollarsanddata")
# Dummy to pull data if needed
pull_data <- 1
if (pull_data == 1){
for (i in 1:length(handles)){
user <- handles[i]
print(user)
followers <- get_followers(user, n = 1.5*10^6, retryonratelimit = TRUE)
followers$handle <- user
followers$date <- Sys.Date()
}
user_data <- lookup_users(followers$user_id) %>%
select(user_id, screen_name, name, location, description)
}
user_final <- user_data
export_to_excel(user_final, outfile = paste0(out_path, "/", handles[1], "_twitter_data.xlsx"), sheetname = "twtr", 1, 0)
# ############################ End ################################## #
|
42015c61d0903f447a143cae1fdedf7049b730eb
|
ba87a73a22600087a4bb8ea21b15b391c98c9579
|
/Scripts/ROC.R
|
c3a894b0ce6f6a7056e00689b2be5d4a656b7fbc
|
[] |
no_license
|
rajkorde/RTestCode
|
d32b5f87b122b06d5cb7ce71ec4b4823a02669a4
|
4b07937128acd3e73bb6489d341e859df737ff6b
|
refs/heads/master
| 2021-01-16T23:57:45.330753
| 2019-01-31T05:26:20
| 2019-01-31T05:26:20
| 58,427,571
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
ROC.R
|
simple_roc <- function(labels, scores) {
labels <- labels[order(scores, decreasing = TRUE)]
data.frame(TPR = cumsum(labels)/sum(labels),
FPR = cumsum(!labels)/sum(!labels),
labels)
}
set.seed(1)
sim_widget_data <- function(N, noise = 100) {
x <- runif(N, min = 0, max = 100)
y <- 122 - x/2 + rnorm(N, sd=noise)
bad_widget <- factor(y > 100)
data.frame(x, y, bad_widget)
}
widget_data <- sim_widget_data(500, 10)
test_set_idx <- sample(1:nrow(widget_data), size=floor(nrow(widget_data)/4))
test_set <- widget_data[test_set_idx,]
training_set <- widget_data[-test_set_idx,]
library(ggplot2)
library(dplyr)
ggplot(test_set, aes(x, y, col = bad_widget)) +
scale_color_manual(values = c("black", "red")) +
geom_point() +
ggtitle("Bad widgets related to x")
fit_glm <- glm(bad_widget ~ x, training_set, family=binomial(link="logit"))
glm_link_scores <- predict(fit_glm, test_set, type="link")
glm_response_scores <- predict(fit_glm, test_set, type="response")
score_data <- data.frame(link = glm_link_scores,
response = glm_response_scores,
bad_widget = test_set$bad_widget,
stringsAsFactors = FALSE)
score_data %>%
ggplot(aes(x = link, y = response, col = bad_widget)) +
scale_color_manual(values = c("black", "red")) +
geom_point() +
geom_rug() +
ggtitle("Both link and response scores put cases in the same order")
library(pROC)
r <- roc(test_set$bad_widget, glm_response_scores, direction = "<")
plot(r, col = "yellow", lwd = 3, main = "ROC curve")
glm_simple_roc <- simple_roc(test_set$bad_widget=="TRUE", glm_link_scores)
|
5b8cd9340216c965da0eb1567f737104bbe4f407
|
56a262e561b5d13b2aa47f710a1b04ea385db33d
|
/R/LiDARForestStand.R
|
af2cc62fd62b9e85b7d62cf0fb8328614a4df667
|
[] |
no_license
|
carlos-alberto-silva/rLiDAR
|
f94053414eeebafddd40f5404de32b2de9630488
|
956431248635ef04bc31fa0c1eff4f7b972c5d88
|
refs/heads/master
| 2023-01-22T02:25:27.999142
| 2021-10-04T19:38:20
| 2021-10-04T19:38:20
| 169,238,645
| 10
| 5
| null | 2023-01-11T18:50:33
| 2019-02-05T12:37:11
|
R
|
UTF-8
|
R
| false
| false
| 11,802
|
r
|
LiDARForestStand.R
|
#'3D stand visualization of LiDAR-derived individual trees
#'
#'@description Draws a 3D scatterplot for individual trees detected from Lidar data.
#'
#'@usage LiDARForestStand(crownshape = c("cone", "ellipsoid", "halfellipsoid",
#' "paraboloid", "cylinder"), CL = 4, CW = 8, HCB = 10,
#' X = 0, Y = 0, dbh = 0.3, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'@param crownshape shape of individual tree crown: "cone", "ellipsoid","halfellipsoid", "paraboloid" or "cylinder". Default is "halfellipsoid".
#'@param CL crown length.
#'@param CW crown diameter.
#'@param HCB height at canopy base.
#'@param X x-coordinate.
#'@param Y y-coordinate.
#'@param dbh diameter at breast height (1.73 m).
#'@param crowncolor crown color.
#'@param stemcolor stem color.
#'@param resolution crown resolution: "low", "medium" and "high".
#'@param mesh Logical, if TRUE (default) returns a tree crown mesh model, and if FALSE returns a tree crown line mode.
#'@return Returns a 3-D scatterplot of the individual trees as identified automatically from the LiDAR.
#'@author Carlos Alberto Silva and Remko Duursma. Uses code by Remko Duursma (\emph{Maeswrap} package,see "Plotstand").
#'@references \url{https://maespa.github.io/}
#'@examples
#'
#'\donttest{
#'#=======================================================================#
#'# EXAMPLE 01: Plotting single trees
#'#=======================================================================#
#'
#'# cone crown shape
#'library(rgl)
#'open3d()
#'LiDARForestStand(crownshape = "cone", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'# ellipsoid crown shape
#'open3d()
#'LiDARForestStand(crownshape = "ellipsoid", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'# halfellipsoid crown shape
#'open3d()
#'LiDARForestStand(crownshape = "halfellipsoid", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'# paraboloid crown shape
#'open3d()
#'LiDARForestStand(crownshape = "paraboloid", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'# cylinder crown shape
#'open3d()
#'LiDARForestStand(crownshape = "cylinder", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=TRUE)
#'
#'# Set the shape=FALSE
#'open3d()
#'LiDARForestStand(crownshape = "paraboloid", CL = 10, CW =7,
#' HCB = 5, X =0, Y = 0, dbh = 0.4, crowncolor = "forestgreen",
#' stemcolor = "chocolate4", resolution="high", mesh=FALSE)
#'
#'#=======================================================================#
#'#EXAMPLE 02: Plotting a forest plantation stand in virtual 3-D space
#'#=======================================================================#
#'
#'# Set the dimensions of the displayed forest stand
#'xlength<-30 # x length
#'ylength<-20 # y length
#'
#'# Set the space between trees
#'sx<-3 # x space length
#'sy<-2 # y space length
#'
#'# Tree location grid
#'XYgrid <- expand.grid(x = seq(1,xlength,sx), y = seq(1,ylength,sy))
#'
#'# Get the number of trees
#'Ntrees<-nrow(XYgrid)
#'
#'# Plot a virtual Eucalyptus forest plantation stand using the halfellipsoid tree crown shape
#'
#'# Set stand trees parameters
#'meanHCB<-5 # mean of the height at canopy base
#'sdHCB<-0.1 # standard deviation of the height at canopy base
#'HCB<-rnorm(Ntrees, mean=meanHCB, sd=sdHCB) # height at canopy base
#'CL<-HCB # tree crown height
#'CW<-HCB*0.6 # tree crown diameter
#'
#'open3d() # open a rgl window
#'
#'# Plotting the stand
#'for( i in 1:Ntrees){
#' LiDARForestStand(crownshape = "halfellipsoid", CL = CL[i], CW = CW[i],
#' HCB = HCB[i], X = XYgrid[i,1], Y = XYgrid[i,2], dbh = 0.4,
#' crowncolor = "forestgreen", stemcolor = "chocolate4",
#' resolution="high", mesh=TRUE)
#'}
#'
#'# Add other plot parameters
#'axes3d(c("x-", "x-", "y-", "z"), col="gray") # axes
#'title3d(xlab = "X Coord", ylab = " Y Coord", zlab = "Height", col="red") # title
#'planes3d(0, 0, -1, 0.001, col="gray", alpha=0.7) # set a terrain plane
#'
#'
#'# Plotting a virtual single-species forest plantation stand using "cone" tree crown shape
#'
#'# Set parameters f trees growing within the virtual stand
#'meanHCB<-3 # mean of the height at canopy base
#'sdHCB<-0.1 # standard deviation of the height at canopy base
#'HCB<-rnorm(Ntrees, mean=meanHCB, sd=sdHCB) # height at canopy base
#'CL<-HCB*2.0 # tree crown height
#'CW<-HCB*1.3 # tree crown diameter
#'
#'open3d() # open a rgl window
#'# Plot stand
#'for( i in 1:Ntrees){
#' LiDARForestStand(crownshape = "cone", CL = CL[i], CW = CW[i],
#' HCB = HCB[i], X = XYgrid[i,1], Y = XYgrid[i,2], dbh = 0.4,
#' crowncolor = "forestgreen", stemcolor = "chocolate4",
#' resolution="high", mesh=TRUE)
#'}
#'
#'# Add other plot parameters
#'axes3d(c("x-", "x-", "y-", "z"), col="gray") # axes
#'title3d(xlab = "X Coord", ylab = " Y Coord", zlab = "Height", col="red") # title
#'planes3d(0, 0, -1, 0.001, col="gray", alpha=0.7) # set a terrain plane
#'
#'#=======================================================================#
#'# EXAMPLE 03: Plotting a virtual mixed forest stand
#'#=======================================================================#
#'
#'# 01. Plot different trees species in the stand with different crown shapes
#'
#'# Set the number of trees
#'Ntrees<-80
#'
#'# Set the trees locations
#'xcoord<-sample(1:100, Ntrees) # x coord
#'ycoord<-sample(1:100, Ntrees) # y coord
#'
#'# Set a location grid of trees
#'XYgrid<-cbind(xcoord,ycoord)
#'
#'# Plot the location of the trees
#'plot(XYgrid, main="Tree location")
#'
#'meanHCB<-7 # mean of the height at canopy base
#'sdHCB<-3 # standard deviation of height at canopy base
#'HCB<-rnorm(Ntrees, mean=meanHCB, sd=sdHCB) # height at canopy base
#'crownshape<-sample(c("cone", "ellipsoid","halfellipsoid",
#' "paraboloid"), Ntrees, replace=TRUE) # tree crown shape
#'CL<-HCB*1.3 # tree crown height
#'CW<-HCB # tree crown diameter
#'
#'open3d() # open a rgl window
#'# Plot stand
#'
#'for( i in 1:Ntrees){
#' LiDARForestStand(crownshape = crownshape[i], CL = CL[i], CW = CW[i],
#' HCB = HCB[i], X = as.numeric(XYgrid[i,1]), Y = as.numeric(XYgrid[i,2]),
#' dbh = 0.4, crowncolor = "forestgreen", stemcolor = "chocolate4",
#' resolution="high", mesh=TRUE)
#'}
#'
#'# Add other plot parameters
#'axes3d(c("x-", "x-", "y-", "z"), col="gray") # axes
#'title3d(xlab = "X Coord", ylab = " Y Coord", zlab = "Height", col="red") # title
#'planes3d(0, 0, -1, 0.001, col="gray", alpha=0.7) # set a terrain plane
#'
#'
#'# 02. Plot different tree height in the stand using different crown colors
#'
#'# Set the number of trees
#'Ntrees<-80
#'
#'# Set the tree locations
#'xcoord<-sample(1:100, Ntrees) # x coord
#'ycoord<-sample(1:100, Ntrees) # y coord
#'
#'# Set a location grid of trees
#'XYgrid<-cbind(xcoord,ycoord)
#'
#'# plot the location of the trees
#'plot(XYgrid, main="Tree location")
#'
#'meanHCB<-7 # mean of the height at canopy base
#'sdHCB<-3 # standard deviation of the height at canopy base
#'HCB<-rnorm(Ntrees, mean=meanHCB, sd=sdHCB) # height at canopy base
#'crownshape<-sample(c("cone", "ellipsoid","halfellipsoid", "paraboloid"),
#' Ntrees, replace=TRUE) # tree crown shape
#'CL<-HCB*1.3 # tree crown height
#'CW<-HCB # tree crown diameter
#'
#'# Plot tree height based on the HCB quantiles
#'HCBq<-quantile(HCB) # HCB quantiles
#'crowncolor<-NA*(1:Ntrees) # set an empty crowncolor vector
#'
#'# classify trees by HCB quantile
#'for (i in 1:Ntrees){
#' if (HCB[i] <= HCBq[2]) {crowncolor[i]<-"red"} # group 1
#' if (HCB[i] > HCBq[2] & HCB[i] <= HCBq[3] ) {crowncolor[i]<-"blue"} # group 2
#' if (HCB[i] > HCBq[3] & HCB[i] <= HCBq[4] ) {crowncolor[i]<-"yellow"} # group 3
#' if (HCB[i] >= HCBq[4]) {crowncolor[i]<-"dark green"} # group 4
#'}
#'
#'open3d() # open a rgl window
#'
#'# Plot stand
#'for(i in 1:Ntrees){
#' LiDARForestStand(crownshape = crownshape[i], CL = CL[i], CW = CW[i],
#' HCB = HCB[i], X = as.numeric(XYgrid[i,1]), Y = as.numeric(XYgrid[i,2]),
#' dbh = 0.4, crowncolor = crowncolor[i],stemcolor = "chocolate4",
#' resolution="high", mesh=TRUE)
#'}
#'
#'# Add other plot parameters
#'axes3d(c("x-", "x-", "y-", "z"), col="gray") # axes
#'title3d(xlab = "X Coord", ylab = " Y Coord", zlab = "Height", col="red") # title
#'planes3d(0, 0, -1, 0.001, col="gray", alpha=0.7) # set a terrain plane
#'
#'}
#'@export
#'@importFrom geometry convhulln
#'@importFrom rgl plot3d open3d bg3d rgl.triangles
LiDARForestStand<-function(crownshape = c("cone", "ellipsoid","halfellipsoid", "paraboloid", "cylinder"),
CL = 4, CW = 8,HCB = 10, X = 0, Y = 0, dbh = 0.3, crowncolor = "forestgreen",
stemcolor = "chocolate4", resolution="high",mesh=TRUE)
{
if (crownshape!="cone"& crownshape!="ellipsoid"&crownshape!="halfellipsoid"&crownshape!="paraboloid"&crownshape!="cylinder") {stop("The crownshape parameter is invalid. Please, use one of this crownshape types: 'cone','ellipsoid','halfellipsoid','paraboloid','cylinder'")}
if (class(HCB)!="numeric") {stop("The HCB parameter is invalid. It is not a numeric parameter")}
if (class(X)!="numeric") {stop("The X parameter is invalid. It is not a numeric parameter")}
if (class(Y)!="numeric") {stop("The Y parameter is invalid. It is not a numeric parameter")}
if (class(dbh)!="numeric") {stop("The HCB parameter is invalid. It is not a numeric parameter")}
if (resolution!="high" & resolution!="medium" & resolution!="low") {stop("The resolution parameter is invalid. It must to be 'high', 'median' or 'low'")}
if (class(mesh)!="logical") {stop("The shape parameter is invalid. It must to be a TRUE or FALSE logical statement")}
if (resolution=="low"){nz<-15;nalpha<-15}
if (resolution=="medium"){nz<-25;nalpha<-25}
if (resolution=="high"){nz<-40;nalpha<-40}
if (mesh==TRUE) {
shape <- match.arg(crownshape)
H <- HCB + CL
dbase <- dbh * (H/(H - 1.3))
if (!is.finite(dbase))
dbase <- dbh
m1 <- coord3dshape(shape, CW = CW, CL = CL, z0 = HCB, x0 = X,
y0 = Y, nz = nz, nalpha = nalpha)
m2 <- coord3dshape("cone", CW = dbase, CL = H, z0 = 0, x0 = X,
y0 = Y, nz = nz, nalpha = nalpha)
interpol(m1, col = crowncolor)
interpol(m2, col = stemcolor)
} else {
TreesModel(crownshape=crownshape, CW = CW, CL = CL, z0 = 0,HCB=HCB, x0 = X,
y0 = Y, nz = nz, nalpha = nalpha, dbh = dbh,crowncolor = crowncolor,
stemcolor = stemcolor)
}
}
|
39bec8f3f49e5b5c8016c669fc3fc2885cf367bd
|
af9ab6ba9d4f4d33d68cd47ec2dfb4e178deb517
|
/similate_RW_no_migr_range.R
|
5dd73cca2d9e08067ede582d931008d9c9de5370
|
[] |
no_license
|
diego-ellis-soto/Simulation_the_spread_of_poop
|
acb8aff71859f9d6030cc1ef923d595419314b61
|
6da3c9f406adb1334369686bd765280f30e8e781
|
refs/heads/master
| 2020-04-05T16:53:43.434239
| 2018-11-13T03:16:45
| 2018-11-13T03:16:45
| 157,033,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,059
|
r
|
similate_RW_no_migr_range.R
|
# Start at 1, then 9, then 17 ...
# Every nth event (discrete timestep) a pooping event happens
# Random walk within winter/summerrange
# average of 624 guava seeds
# Simulate random walks within wintering and breedingg range:
rw_within_homerange = function(ndays, daily_distance_moved, range_shp, avg_gut_retention_time, movement_model,defined_start_point = NULL){
message('May I suggest setting a seed?')
if(is.null(defined_start_point)){
message('No start point specified \nMaking a random point inside the non migratory range')
sp = spsample(range_shp, 1, 'random') # startin point is a random point within the shape (winter and breeding range)
}else{
sp = defined_start_point
}
days_of_poop_event <- seq(1, ndays,
avg_gut_retention_time)
steps.df <- data.frame(matrix(0,ndays,6)) # 2 # Add the output to a big data frame
# Columns: Lat/Long, poop_event, Nseed_in_timestep, Germinate,
# There is a third columns now that has poop event
colnames(steps.df) <- c("Longitude", "Latitude", 'Poop_event',
'NSeeds', 'Nseeds_germinated', 'N_seedling_to_shrub')
if(movement_model == 'random_walk'){
cat('Random Walk choosen')
# Ignacio: How do I add 20 000 individuals on top?
for(i in 1:ndays){ # i = 1; i =2
# print(i)
if(i == 1){
# IGNACIO HELP HERE: HOW DO I DO CUMSUM INSIDE A LOOP?
xy = mvrnorm(1, c(0,0), matrix(c(daily_distance_moved,0,0,daily_distance_moved),2,2))
steps.df[i, 1] <- xy[1] # + sp@coords[1] # Add the random walk with longitude
steps.df[i, 2] <- xy[2] # + sp@coords[2]
if(i %in% days_of_poop_event){
message('Day ', i, ' had a poop event')
steps.df[i, 3] <- 1 # 1 means a poop event
}
}else{
xy = mvrnorm(1, c(steps.df[(i-1),1], steps.df[(i-1),2]), matrix(c(daily_distance_moved,0,0,daily_distance_moved),2,2))
require(rgeos)
tmp_xy = data.frame(Longitude = ( xy[1] + sp@coords[1]),
Latitude = ( xy[2] + sp@coords[2] ))
tmp_xy <- SpatialPoints(tmp_xy)
proj4string(tmp_xy) <- proj4string(range_shp)
if(gContains(range_shp,tmp_xy) == FALSE){
cat('Step was outside the species range, skipping it')
next
} # If the random step is outside the polygon, draw the step again
# point.in.polygon(tmp_xy@coords[1],tmp_xy@coords[2], bbox(range_shp))
# plot(range_shp)
# plot(tmp_xy, col = 'green', add=T)
# tmp_xy %over% range_shp
# any( !is.na(over(tmp_xy, range_shp)))
# inside.park <- !is.na(over(bears, as(parks, "SpatialPolygons")))
steps.df[i, 1] <- xy[1] # + sp@coords[1] # Add the random walk with longitude
steps.df[i, 2] <- xy[2] # + sp@coords[2]
if(i %in% days_of_poop_event){
message('Day ', i, ' had a poop event')
steps.df[i, 3] <- 1 # 1 means a poop event
}
}
}
steps.df[, 1] = steps.df[, 1] + sp@coords[1] # Add the random walk with longitude
steps.df[, 2] = steps.df[, 2] + sp@coords[2]
proj = "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
proj = proj4string(range_shp)
xysp <- SpatialPointsDataFrame(coords= steps.df[,c('Longitude', 'Latitude')], data = steps.df,proj4string = CRS(proj))
last_point = tail(xysp,1)
last_point = SpatialPoints(last_point)
proj4string(last_point) = proj
par(mfrow=c(1,1))
plot(range_shp)
points(xysp, type = 'l')
points(last_point, col = 'blue' , pch =16)
points( sp, col = 'red', pch = 16)
plot(xysp@coords, type = 'l', main = paste0( movement_model ,' in wintering range'), lwd = 1.5)
points( sp, col = 'red', pch = 16) # first point
points(last_point, col = 'blue', pch = 16) # last point
xysp %>% subset(Poop_event == 1) %>%
points(col = 'brown', pch = 24, bg = alpha('chocolate4', 0.4 ))
plot(range_shp, add=T)
# points(tail(xysp@coords,1), col = 'blue', pch = 16)
}
if(movement_model == 'levy_walk'){
require(adehabitatLT)
# SIMULATE LEVY WALK INSIDE THE BOUNDING BOX
lv4 = simm.levy(date = 1:ndays, mu = 3, l0 = daily_distance_moved, x0 = c(sp@coords), # minimum lengh of a step l0
id = "A1", burst = "mu = 3.0", typeII = TRUE,
proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
steps.df = (ld(lv4))
steps.df = steps.df[,c(1:2)] # keep only lat long
steps.df$Poop_event = 0
steps.df[days_of_poop_event,]$Poop_event <- 1
steps.df$Nseeds = 0
steps.df$Nseeds_germinated = 0
steps.df$N_seedling_to_shrub = 0
steps.df$Nseeds_germinated
proj = "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
proj = proj4string(range_shp)
xysp <- SpatialPointsDataFrame(coords= steps.df[,c('x', 'y')], data = steps.df,proj4string = CRS(proj))
names(steps.df)[1] ='Longitude'
names(steps.df)[2] ='Latitude'
last_point = tail(xysp,1)
last_point = SpatialPoints(last_point)
proj4string(last_point) = proj
par(mfrow=c(1,1))
plot(range_shp)
points(xysp, type = 'l')
points(last_point, col = 'blue' , pch =16)
points( sp, col = 'red', pch = 16)
plot(xysp@coords, type = 'l', main = paste0( movement_model ,' in wintering range'), lwd = 1.5)
points( sp, col = 'red', pch = 16) # first point
points(last_point, col = 'blue', pch = 16) # last point
xysp %>% subset(Poop_event == 1) %>%
points(col = 'brown', pch = 24, bg = alpha('chocolate4', 0.4 ))
plot(range_shp, add=T)
}
# Alternatively the starting point can be the centroid of the homerange
# steps.df[i, 1] <- xy[1] + sp@coords[1] # Add the random walk with longitude
# steps.df[i, 2] <- xy[1] + sp@coords[2]
# xysp <- SpatialPoints(steps.df)
# proj4string(xysp) <- proj4string(range_shp)
return(steps.df)
}
|
05322b7ee58e8233ec048bf49517373743929e13
|
32082d5417b956c162cc37f81c86557cdf085900
|
/PreliminaryCalcsFunction.R
|
44de36bcc3834365746979fdbfedf8189fcc3190
|
[] |
no_license
|
vshanks/ghg_emissions_predictions
|
dcf2923244bb341ed75a7c5b5d4efb443246d0ad
|
162f96d688ec4ca0655a97eeda94632987eb0475
|
refs/heads/main
| 2022-12-14T13:31:52.492644
| 2020-09-10T19:03:08
| 2020-09-10T19:03:08
| 294,498,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,154
|
r
|
PreliminaryCalcsFunction.R
|
PreliminaryCalcsFunction <- function(CurrentDirectory, carbon_data){
raw <- carbon_data
# See the variables
# colnames(raw)
# Select some for analysis
Subsector <- as.character(raw$RBICS.subsector.Code)
TotalGHG <- raw$Total.GHG.emssion..1..2.
Scope1 <- raw$GHG.Scope.1
Scope2 <- raw$GHG.Scope.2
Revenue <- as.numeric(as.character(raw$PPP..Revenue))
Employees <- as.numeric(as.character(raw$Number.of.employees))
FixedAssets <- as.numeric(as.character(raw$Gross.Fixed.Assets..PPP...millions.))
IntangibleAssets <- as.numeric(as.character(raw$Disclosed.Intangible.Assets..PPP...millions.))
TotalPower <- as.numeric(as.character(raw$TOTAL_POWER_GENERATED))
Country <- as.character(raw$Headquarterscountry)
Used <- !is.na(TotalGHG) # Data available
# Some preliminary plots
hist(log(TotalGHG[Used]))
plot( log(TotalPower[Used]),log(TotalGHG[Used]))
plot( log(Employees[Used]),log(TotalGHG[Used]))
plot( log(Revenue[Used]),log(TotalGHG[Used]))
# Normalize by revenue
TotalGHGbyRevenue <- TotalGHG / Revenue
EmployeesByRevenue <- Employees / Revenue
AssetsByRevenue <- FixedAssets / Revenue
IntangiblesByRevenue <- IntangibleAssets / Revenue
TotalPowerByRevenue <- TotalPower / Revenue
plot(log(EmployeesByRevenue[Used]), log(TotalGHGbyRevenue[Used]))
plot(log(AssetsByRevenue[Used]), log(TotalGHGbyRevenue[Used]))
# Now look at subsectors
GHGbySubsectorsplit <- split(log(TotalGHGbyRevenue[Used]), Subsector[Used])
hist(sapply(GHGbySubsectorsplit,length))
hist(sapply(GHGbySubsectorsplit,median))
# Order Subsectors by median GHG
MedSectorGHGsplit <- sapply(GHGbySubsectorsplit,median,na.rm=TRUE)
MedSectorGHGsplit[is.na(MedSectorGHGsplit)] <- min(MedSectorGHGsplit,na.rm=TRUE)
OrderMeds <- order(MedSectorGHGsplit)
RankMeds <- rank(MedSectorGHGsplit,ties.method = "random")
SubsectorRank <- unsplit(RankMeds,Subsector[Used])
plot(SubsectorRank, log(TotalGHGbyRevenue[Used]))
# Create variables for analysis
LUTotGHGbyRev <- log(TotalGHG / Revenue)[Used]
LUEmpByRev <- log(Employees / Revenue)[Used]
LUAssetsByRev <- log(FixedAssets / Revenue)[Used]
LUIntangByRev <- log(IntangibleAssets / Revenue)[Used]
LUTotPowByRev <- log(TotalPower / Revenue)[Used]
library(rpart)
fit0 <- rpart(LUTotGHGbyRev ~ LUEmpByRev + LUAssetsByRev + LUIntangByRev + LUTotPowByRev + SubsectorRank,cp=0.005)
plot(fit0)
#text(fit0, use.n = TRUE, all = TRUE,cex=0,5)
text(fit0,splits=TRUE,cex=0.5,digits=2,use.n=TRUE)
# Order Counties by median GHG
GHGbyCountrysplit <- split(log(TotalGHGbyRevenue[Used]), Country[Used])
MedCountryGHGsplit <- sapply(GHGbyCountrysplit,median,na.rm=TRUE)
MedCountryGHGsplit[is.na(MedCountryGHGsplit)] <- min(MedCountryGHGsplit,na.rm=TRUE)
OrderMeds <- order(MedCountryGHGsplit)
RankMeds <- rank(MedCountryGHGsplit,ties.method = "random")
CountryRank <- unsplit(RankMeds,Country[Used])
plot(CountryRank, log(TotalGHGbyRevenue[Used]))
fit1 <- rpart(LUTotGHGbyRev ~ LUEmpByRev + LUAssetsByRev + LUIntangByRev + LUTotPowByRev + SubsectorRank + CountryRank,cp=0.005)
plot(fit1)
#text(fit0, use.n = TRUE, all = TRUE,cex=0,5)
text(fit1,splits=TRUE,cex=0.5,digits=2,use.n=TRUE)
plot(fit1,branch=0.6,compress=T,uniform=T)
#text(fit0, use.n = TRUE, all = TRUE,cex=0,5)
text(fit1,splits=TRUE,cex=0.5,digits=2,use.n=TRUE)
plot(fit1$frame$yval[fit1$where], LUTotGHGbyRev[!is.na(LUTotGHGbyRev)])
fit2 <- rpart(LUTotGHGbyRev ~ LUEmpByRev + LUAssetsByRev + LUIntangByRev + LUTotPowByRev + SubsectorRank + CountryRank, minbucket = 5,cp=0.00001)
plot(fit2$frame$yval[fit2$where], LUTotGHGbyRev[!is.na(LUTotGHGbyRev)])
fit3 <- rpart(LUTotGHGbyRev ~ LUEmpByRev + LUAssetsByRev + LUIntangByRev + LUTotPowByRev + SubsectorRank + CountryRank, minbucket = 5,cp=0.00001)
# Now fit all
# Extend SubsectorRank
SubsectorRankA <- rep(0,length(Used))
# split used subsectors by rank
ssbr <- split(Subsector[Used],SubsectorRank)
srmbr <- split(SubsectorRank,SubsectorRank)
first <- function(x){
x[1]
}
rsmbr <- sapply(srmbr,first)
for(i in 1: length(ssbr)){
SubsectorRankA[Subsector %in% ssbr[[i]]] <- rsmbr[i]
}
# for all in split,
# Extend Country Rank
CountryRankA <- rep(0,length(Used))
# split used Countrys by rank
ssbr <- split(Country[Used],CountryRank)
srmbr <- split(CountryRank,CountryRank)
first <- function(x){
x[1]
}
rsmbr <- sapply(srmbr,first)
for(i in 1: length(ssbr)){
CountryRankA[Country %in% ssbr[[i]]] <- rsmbr[i]
}
LUTotGHGbyRevA <- log(TotalGHG / Revenue)
LUEmpByRevA <- log(Employees / Revenue)
LUAssetsByRevA <- log(FixedAssets / Revenue)
LUIntangByRevA <- log(IntangibleAssets / Revenue)
LUTotPowByRevA <- log(TotalPower / Revenue)
fit3 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA + LUAssetsByRevA + LUIntangByRevA + LUTotPowByRevA + SubsectorRankA + CountryRankA, minbucket = 5,cp=0.01,weights = Used)
plot(fit3$frame$yval[fit3$where], LUTotGHGbyRev[!is.na(LUTotGHGbyRev)])
fit4 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA , minbucket = 5,cp=0.0001,weights = Used)
fit4 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA + LUAssetsByRevA + LUIntangByRevA + LUTotPowByRevA + SubsectorRankA + CountryRankA, minbucket = 5,cp=0.000001,weights = Used)
par(mfrow=c(2,1))
plot(fit4$frame$yval[fit4$where], LUTotGHGbyRev[!is.na(LUTotGHGbyRev)])
# Compute fitted values for all the records
newdata <- list(LUEmpByRevA = LUEmpByRevA,LUAssetsByRevA = LUAssetsByRevA,LUIntangByRevA=LUIntangByRevA, LUTotPowByRevA = LUTotPowByRevA, SubsectorRankA = SubsectorRankA,CountryRankA=CountryRankA )
pred <- predict(fit4,newdata)
#Check predictions match fitted values
junk <- (pred[Used])[!is.na(LUTotGHGbyRev)] - fit4$frame$yval[fit4$where]
plot(fit4$frame$yval[fit4$where],junk)
# Miscellaneous other plots
par(mfrow=c(1,1))
fit4 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA + LUAssetsByRevA + LUIntangByRevA + LUTotPowByRevA + SubsectorRankA + CountryRankA, minbucket = 5,cp=0.000001,weights = Used)
#fit4 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA + LUAssetsByRevA + LUIntangByRevA + LUTotPowByRevA + SubsectorRankA + CountryRankA, minbucket = 5,cp=0.01,weights = Used)
fit4 <- rpart(LUTotGHGbyRevA ~ LUAssetsByRevA + SubsectorRankA , minbucket = 5,cp=0.005,weights = Used)
plot(fit4$frame$yval[fit4$where], LUTotGHGbyRev[!is.na(LUTotGHGbyRev)],pch=".")
plot(SubsectorRankA[Used], LUAssetsByRevA[Used],pch=".")
############ ERROR HERE
# plot(SubsectorRankA[Used], fit4$frame$yval[fit4$where],pch=".")
# Now save the good plots
plotfile <- paste(CurrentDirectory,"/PreliminaryPlots%03d.pdf",sep="")
pdf(file=plotfile, height = 4, width = 6,onefile=FALSE)
fit4 <- rpart(LUTotGHGbyRevA ~ LUAssetsByRevA + SubsectorRankA , minbucket = 5,cp=0.005,weights = Used)
main="GHG fit by Subsector and Assets"
y <- fit4$frame$yval
GoodUsed <- !is.na(LUTotGHGbyRev)
Y <- LUTotGHGbyRevA[Used][GoodUsed]
x1 <- SubsectorRankA[Used][GoodUsed]
x2 <- LUAssetsByRevA[Used][GoodUsed]
x1name <- "Subsector"
x2name <- "Assets"
eps1 <- 0.01 * (max(x1,na.rm = TRUE) - min(x1,na.rm=TRUE))
eps2 <- 0.01 * (max(x2,na.rm = TRUE) - min(x2,na.rm=TRUE))
loc <- fit4$where
uniloc <-unique(loc)
col = terrain.colors(floor(length(y) * 1.25))
par(mfrow=c(1,1))
plot(x1,x2,cex=0.5,
col=col[rank(y,ties.method="first")[loc]],
main= main,
xlab= x1name,
ylab= x2name)
for( i in 1:length(uniloc)){
these <- (loc == uniloc[i])
#if(sum(these,na.rm=TRUE) > 0){
if(length(these) > 0){
mx1<- min(x1[these],na.rm=TRUE) - eps1
Mx1 <- max(x1[these],na.rm=TRUE) + eps1
mx2<- min(x2[these],na.rm=TRUE) - eps2
Mx2 <- max(x2[these],na.rm=TRUE) + eps2
lines(c(mx1,mx1,Mx1,Mx1,mx1), c(mx2,Mx2,Mx2,mx2,mx2),
col=col[rank(y,ties.method="first")[uniloc[i]]])
print(c(i, uniloc[i],mx1,Mx1,mx2,Mx2))
}
}
fit5 <- rpart(LUTotGHGbyRevA ~ LUEmpByRevA + LUAssetsByRevA + LUIntangByRevA + LUTotPowByRevA + SubsectorRankA + CountryRankA, minbucket = 5,cp=0.000001,weights = Used)
y <- fit5$frame$yval
main="GHG fit by Many variables"
GoodUsed <- !is.na(LUTotGHGbyRev)
Y <- LUTotGHGbyRevA[Used][GoodUsed]
x1 <- SubsectorRankA[Used][GoodUsed]
x2 <- LUAssetsByRevA[Used][GoodUsed]
x1name <- "Subsector"
x2name <- "Assets"
eps1 <- 0.01 * (max(x1,na.rm = TRUE) - min(x1,na.rm=TRUE))
eps2 <- 0.01 * (max(x2,na.rm = TRUE) - min(x2,na.rm=TRUE))
loc <- fit5$where
uniloc <-unique(loc)
col = terrain.colors(floor(length(y) * 1.25))
par(mfrow=c(1,1))
plot( y[loc],Y,cex=0.5,
main=main,
xlab="Fitted Values",
ylab="GHG",
#col=terrain.colors(floor(length(Y) * 1.25))[rank(Y,ties.method="first")]
col=col[rank(y,ties.method="first")[loc]]
)
plot(x1,x2,cex=0.5,
col=col[rank(y,ties.method="first")[loc]],
main= main,
xlab= x1name,
ylab= x2name)
for( i in 1:length(uniloc)){
these <- (loc == uniloc[i])
#if(sum(these,na.rm=TRUE) > 0){
if(length(these) > 0){
mx1<- min(x1[these],na.rm=TRUE) - eps1
Mx1 <- max(x1[these],na.rm=TRUE) + eps1
mx2<- min(x2[these],na.rm=TRUE) - eps2
Mx2 <- max(x2[these],na.rm=TRUE) + eps2
lines(c(mx1,mx1,Mx1,Mx1,mx1), c(mx2,Mx2,Mx2,mx2,mx2),
col=col[rank(y,ties.method="first")[uniloc[i]]])
print(c(i, uniloc[i],mx1,Mx1,mx2,Mx2))
}
}
dev.off()
}
|
25ec5fa601b85efa3b709d31d06139ea2437674c
|
36c806d7529594cf933b19278d2741b83509fd96
|
/MToolBox_config_files/Mtoolbox.R
|
fbfac3231172063c4059e0a506f8de3d8bac1816
|
[
"MIT"
] |
permissive
|
Phillip-a-richmond/AnnotateVariants
|
98c33b361ca8ee1bf2ac82b7c8d02188ec41efd8
|
dddbb245f7348b119460607d59ca6cba4afa72f3
|
refs/heads/master
| 2023-04-13T13:14:02.664815
| 2023-03-20T23:35:21
| 2023-03-20T23:35:21
| 103,580,004
| 16
| 4
|
MIT
| 2021-05-17T23:58:27
| 2017-09-14T20:46:56
|
Shell
|
UTF-8
|
R
| false
| false
| 9,157
|
r
|
Mtoolbox.R
|
#!/usr/bin/env Rscript
## Script to filter and prioritize MT variant identified with MToolBox.
## Based on Maddie Couse Variant Prioritization : https://team.bcchr.ca/display/TGA/MToolbox+Mitochondrial+Analysis
## Developped by Solenne Correard on March 29, 2019
##Last update: SC, April 3rd, 2019
library(plyr)
#Open pedfile
ResultsDirectory=getwd()
setwd(ResultsDirectory)
#Read ped file
pedfiles=list.files(pattern=".ped")
if (length(pedfiles)>1) {
print ("ERROR, several ped files")
} else {
#Create the filtered csv for affected members of the family
ped=read.csv(pedfiles, sep="\t", header=TRUE, na.strings=c("","NA"))
Affected=subset(ped, ped[,6]=="2")
Number_of_affected=nrow(Affected)
for (i in (1: Number_of_affected)){
Proband_ID_i<- gsub("_GRCh38", "", Affected[i,2])
##Open the csv files
MToolBox_files=list.files(pattern="MToolBox_")
if (length(MToolBox_files)<1) {
print ("ERROR, no MToolBox files")
} else {
proband_i_directory=paste0("MToolBox_", Proband_ID_i,"/OUT_", Proband_ID_i)
setwd(proband_i_directory)
annotation_CSV_proband_i=list.files(pattern=".annotation.csv")
csv_proband_i=read.csv(annotation_CSV_proband_i, sep="\t", header=TRUE, na.strings=c("","NA"))
setwd(ResultsDirectory)
##Filter the proband file to keep variant with (HF>0.18 or empty score) & Nt.Variability>0.0026
csv_proband_i_filtered= subset(csv_proband_i, ((csv_proband_i$HF>0.18 | is.na(csv_proband_i$HF)) & csv_proband_i$Nt.Variability <0.0026))
nrow(csv_proband_i)
nrow(csv_proband_i_filtered)
#Rename the columns with the proband number
colnames(csv_proband_i_filtered)=c("Sample_p", ".Variant.Allele", paste0(Proband_ID_i, "_HF_p"), paste0(Proband_ID_i, "_CI_lower.CI_upper_p"), "var_RSRS", "var_MHCS", "var_rCRS", "var_Haplogroup", "var_Other.Haplogroups", "var_Locus", "var_Nt.Variability", "var_Codon.Position", "var_Aa.Change", "var_Aa.Variability", "var_tRNA.Annotation", "var_Disease.Score", "var_RNA.predictions", "var_MutPred.pred", "var_MutPred.prob", "var_PolyPhen.2.HumDiv.pred", "var_PolyPhen.2.HumDiv.prob", "var_PolyPhen.2.HumVar.pred", "var_PolyPhen.2.HumVar.prob", "var_PANTHER.pred", "var_PANTHER.prob", "var_PhD.SNP.pred", "var_PhD.SNP.prob", "var_SNPs.GO.pred", "var_SNPs.GO.prob", "var_Mitomap.Associated.Disease.s.", "var_Mitomap.Homoplasmy", "var_Mitomap.Heteroplasmy", "var_Somatic.Mutations", "var_SM.Homoplasmy", "var_SM.Heteroplasmy", "var_ClinVar", "var_OMIM.link", "var_dbSNP.ID", "var_Mamit.tRNA.link", "var_PhastCons20Way", "var_PhyloP20Way", "var_AC.AN.1000.Genomes", "var_X1000.Genomes.Homoplasmy", "var_X1000.Genomes.Heteroplasmy")
write.table(csv_proband_i_filtered, file=paste0("MToolBox_annotated_p", Proband_ID_i), sep="\t", row.names = FALSE, quote=FALSE)
}
}
#Create the filtered csv for mother of the family
Mother_ID <- gsub("_GRCh38", "", Affected[1,4])
if (Mother_ID=="-9") {
print ("No Mother in the ped file")
} else {
setwd(paste0("MToolBox_", Mother_ID,"/OUT_", Mother_ID, "/"))
annotation_CSV_mother=list.files(pattern=".annotation.csv")
csv_mother=read.csv(annotation_CSV_mother, sep="\t", header=TRUE, na.strings=c("","NA"))
setwd(ResultsDirectory)
#Rename the columns of the mother file with the mother number
colnames(csv_mother)=c("Sample_m", ".Variant.Allele", paste0(Mother_ID, "_HF_m"), paste0(Mother_ID, "_CI_lower.CI_upper_m"), "var_RSRS", "var_MHCS", "var_rCRS", "var_Haplogroup", "var_Other.Haplogroups", "var_Locus", "var_Nt.Variability", "var_Codon.Position", "var_Aa.Change", "var_Aa.Variability", "var_tRNA.Annotation", "var_Disease.Score", "var_RNA.predictions", "var_MutPred.pred", "var_MutPred.prob", "var_PolyPhen.2.HumDiv.pred", "var_PolyPhen.2.HumDiv.prob", "var_PolyPhen.2.HumVar.pred", "var_PolyPhen.2.HumVar.prob", "var_PANTHER.pred", "var_PANTHER.prob", "var_PhD.SNP.pred", "var_PhD.SNP.prob", "var_SNPs.GO.pred", "var_SNPs.GO.prob", "var_Mitomap.Associated.Disease.s.", "var_Mitomap.Homoplasmy", "var_Mitomap.Heteroplasmy", "var_Somatic.Mutations", "var_SM.Homoplasmy", "var_SM.Heteroplasmy", "var_ClinVar", "var_OMIM.link", "var_dbSNP.ID", "var_Mamit.tRNA.link", "var_PhastCons20Way", "var_PhyloP20Way", "var_AC.AN.1000.Genomes", "var_X1000.Genomes.Homoplasmy", "var_X1000.Genomes.Heteroplasmy")
write.table(csv_mother, file=paste0("MToolBox_annotated_m", Mother_ID), sep="\t", row.names = FALSE, quote=FALSE)
}
list_filtered_csv_p=list.files(pattern="MToolBox_annotated_p")
list_filtered_csv_m=list.files(pattern="MToolBox_annotated_m")
if (length(list_filtered_csv_p)==1 & length(list_filtered_csv_m)==0){
#If there is only one proband file (Singleton)
proband_csv = read.csv(list_filtered_csv_p, sep="\t", header=TRUE, na.strings=c("","NA"))
proband_csv_ordered= proband_csv[order(proband_csv$var_Disease.Score), ]
proband_csv_ordered_ordered= proband_csv_ordered[c(2, 1 , 3, 4, 10, 37, 11, 13, 14, 36, 38, 30, 31, 32, 33, 34, 35, 42, 43, 44, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 40, 41, 5, 6, 7, 8, 9, 12, 15, 39)]
write.table(proband_csv_ordered_ordered, file="MToolBox_annotated.txt", sep="\t", row.names = FALSE, quote=FALSE)
} else if (length(list_filtered_csv_p)>1 & length(list_filtered_csv_m)==0){
#If there is no mother and several affected individuals
csv_filtered_merged = read.csv(list_filtered_csv_p[1], sep="\t", header=TRUE, na.strings=c("","NA"))
for (i in 2:length(list_filtered_csv_p)){
currentFile = read.csv(list_filtered_csv_p[i], sep="\t", header=TRUE, na.strings=c("","NA"))
csv_filtered_merged = merge(csv_filtered_merged, currentFile, by=".Variant.Allele", all=TRUE)
csv_filtered_merged =rename(csv_filtered_merged, c("Sample_p.x"="Sample_p"))
}
} else if (length(list_filtered_csv_p)==1 & length(list_filtered_csv_m)==1){
#If there is a mother and 1 affected individual
proband_csv = read.csv(list_filtered_csv_p, sep="\t", header=TRUE, na.strings=c("","NA"))
mother_csv = read.csv(list_filtered_csv_m, sep="\t", header=TRUE, na.strings=c("","NA"))
csv_filtered_merged = merge(proband_csv, mother_csv, by=".Variant.Allele", all.x=TRUE)
} else if (length(list_filtered_csv_p)>1 & length(list_filtered_csv_m)==1){
#If there is a mother and several affected individuals
mother_csv = read.csv(list_filtered_csv_m, sep="\t", header=TRUE, na.strings=c("","NA"))
csv_filtered_merged_p = read.csv(list_filtered_csv_p[1], sep="\t", header=TRUE, na.strings=c("","NA"))
for (i in 2:length(list_filtered_csv_p)){
currentFile = read.csv(list_filtered_csv_p[i], sep="\t", header=TRUE, na.strings=c("","NA"))
csv_filtered_merged_p = merge(csv_filtered_merged_p, currentFile, by=".Variant.Allele", all=TRUE)
csv_filtered_merged_p =rename(csv_filtered_merged_p, c("Sample_p.x"="Sample_p"))
}
csv_filtered_merged = merge(csv_filtered_merged_p, mother_csv, by=".Variant.Allele", all.x=TRUE)
} else {
print ("No solution with the files present")
}
##Check if there is a final file already, if not, create it.
list_final_files=list.files(pattern="MToolBox_annotated.txt")
if (length(list_final_files)==0) {
#Order the variants in the file in :
#Decreasing Disease.Score (High disease score top of the table - threshold for significance disease score > 0.4311)
csv_filtered_merged_ordered= csv_filtered_merged[order(csv_filtered_merged$var_Disease.Score.x), ]
##Merge Samples number in one column, separated by a ","
csv_filtered_merged_ordered_sample= csv_filtered_merged_ordered[grep("Sample_",colnames(csv_filtered_merged_ordered))]
Samples=t(t(apply(csv_filtered_merged_ordered_sample, 1, paste, collapse=",")))
##Merge HF values in one column, separated by a ","
csv_filtered_merged_ordered_HF= csv_filtered_merged_ordered[grep("_HF_",colnames(csv_filtered_merged_ordered))]
HF=t(t(apply(csv_filtered_merged_ordered_HF, 1, paste, collapse=",")))
##Merge CI values in one column, separated by a ","
csv_filtered_merged_ordered_CI= csv_filtered_merged_ordered[grep("_CI_lower.CI_upper_",colnames(csv_filtered_merged_ordered))]
CI_lower_CI_upper=t(t(apply(csv_filtered_merged_ordered_CI, 1, paste, collapse=",")))
##Merge the new data frame with the previous one
csv_filtered_merged_ordered_merged=cbind(csv_filtered_merged_ordered, Samples, HF, CI_lower_CI_upper)
##Select the columns of interest
csv_filtered_merged_ordered_head=head(csv_filtered_merged_ordered_merged[,c(".Variant.Allele", "Samples", "HF", "CI_lower_CI_upper",colnames(csv_filtered_merged_ordered_merged)[grep(".x",colnames(csv_filtered_merged_ordered_merged))])])
##Reorder the columns to fit with the excel template
csv_filtered_merged_ordered_head_ordered <- csv_filtered_merged_ordered_head[c(1, 2, 3, 4, 10, 37, 11, 13, 14, 36, 38, 30, 31, 32, 33, 34, 35, 42, 43, 44, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 40, 41, 5, 6, 7, 8, 9, 12, 15, 39)]
write.table(csv_filtered_merged_ordered_head_ordered, file="MToolBox_annotated.txt", sep="\t", row.names = FALSE, quote=FALSE)
}}
list_file_to_remove=list.files(pattern="MToolBox_annotated_")
file.remove(list_file_to_remove)
|
ad245dd089ef9ee9bfdfc17a2200522d71e0569a
|
5b173d65c0efe16c3c0a58dc6e49e2f3ac26d001
|
/segRNAcountings/R/seg_criteria.R
|
4823e5bbf73c782734b26869285d258391d27b0c
|
[] |
no_license
|
danilodurs/newsegcrit
|
2a5bdb35a565b29242c216572ccb3cf5948c6da9
|
22c4a5bb39f44b7d5216771d72fd6fcb3c98a21a
|
refs/heads/master
| 2020-03-26T21:46:29.033866
| 2018-08-20T11:21:32
| 2018-08-20T11:21:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,319
|
r
|
seg_criteria.R
|
#' log-transformation function for segmentation
#'
#' @param rna.data : a vector of counts from RNA-sequencing
#'
#' @return a vector of log-transformed data
#'
#'
#' @examples
#' log.data <- log.transform(dataset1)
#' plot(dataset1, type="l")
#' plot(log.data, type="l")
log.transform <- function(rna.data)
{
log.data <- log(rna.data+1)
n <- length(rna.data)
##transformation "HALL"
x <- log.data
wei <- c(0.1942, 0.2809, 0.3832, -0.8582)
mat <- wei %*% t(x)
mat[2, -n] = mat[2, -1]
mat[3, -c(n-1, n)] = mat[3, -c(1, 2)]
mat[4, -c(n-2, n-1, n)] = mat[4, -c(1, 2, 3)]
est.sd <- sqrt(sum(apply(mat[, -c(n-2, n-1, n)], 2, sum)^2) / (n-3))
return(log.data/est.sd)
}
#' returning the index of a penalty among a vector of intermediate penalties
#'
#' @param lambda a penalty value
#' @param o.crops the output object from CROPS.RFPOP_
#'
#' @return the index of the penalty, an integer
#'
#' @examples
#' log.t1 <- log.transform(dataRNA[,1])
#' crops.out <- CROPS.RFPOP_(log.t1,min_pen = 5,max_pen = 10,lthreshold = 3)
#' getInd <- getIndex(lambda = 10*log(n), o.crops = crops.out)
#' getInd
#'
getIndexLambda <- function(lambda, o.crops){
n.seg <- ncol(o.crops[[1]])
index <- sum( o.crops[[1]][2, ] <= lambda)
if(index == 0) return(1)
return(index)
}
#' Segmenting a list of dataset for a given segmentation algo and a penalty range
#'
#' @param list.rna.data a list of dataset
#' @param penalty_range a vector of length 2. Respectively. The min and max penalties
#' @param mc.cores a paramater for mclapply, 1 by default... (CF the appropriate documentation about mcapply...)
#' @return a list of values: a list of tau for each beta, a list of smt for each dataset, a list of intermediate penalties.
#'
#' @examples
#' l.d1 <- log.transform(dataset1)
#' l.d2 <- log.transform(dataset2)
#' l.d3 <- log.transform(dataset3)
#' l.data <- list(l.d1,l.d2,l.d3)
#' list.res <- seg.func(list.rna.data=l.data,penalty_range=c(10,100),mc.cores = 1)
#' View (list.res)
#'
seg.func <- function(list.rna.data, penalty_range, mc.cores=1)
{
#First, crops
crops <- mclapply(list.rna.data, FUN=function(rna.data) {
CROPS.RFPOP_(data = rna.data , min_pen = penalty_range[1] , max_pen = penalty_range[2])
}, mc.cores=mc.cores)
## getAllPenalties
all.penalties <- unlist(lapply(crops, FUN=function(x) x[[1]][2, ]))
intermediate.penalties <- sort(unique(all.penalties))
list.tau.results <- lapply(crops, FUN=function(o.crops){
res <- lapply(intermediate.penalties, FUN=function(lambda){
i <- getIndexLambda(lambda, o.crops)
o.crops[[2]][[i]]
})
})
list.smt.results <- lapply(crops, FUN=function(o.crops){
res <- lapply(intermediate.penalties, FUN=function(lambda){
i <- getIndexLambda(lambda, o.crops)
o.crops[[3]][[i]]
})
})
list.results <- list(list.tau.results , list.smt.results, intermediate.penalties)
class(list.results) <- "SEGMENTATIONS"
return(list.results)
}
#' Computing MSE depending on penalty
#'
#' @param list.seg a "SEGMENTATIONS" object
#'
#' @return a matrix of results: the lines represent the observations (the replicas), the "avant-dernière" line: the mean of them. The last line: the intermediate penalties. The columns represent the mse value for each penalty.
#'
#' @examples
#' l.d1 <- log.transform(dataset1)
#' l.d2 <- log.transform(dataset2)
#' l.d3 <- log.transform(dataset3)
#' l.data <- list(l.d1,l.d2,l.d3)
#' list.res <- seg.func(func="rob_seg",list.rna.data=l.data,penalty_range=c(10,100))
#' mse.res <- mse.penalties(list.res)
#'
mse.penalties <- function(list.seg)
{
list.smt.results <- list.seg[[2]]
datasets.index <- 1:length(list.smt.results)
combins <- combn(datasets.index,2,simplify = F)
matrix.results <- NULL
#picking a datset of smt from each replica, for each beta
i <- 1
#for each beta
while( i <= length(list.smt.results[[1]]) )
{
j <- 1
datasets.list <- list()
#defining some datasets for a given beta
while(j <= length(list.smt.results) )
{
datasets.list[[j]] <- list.smt.results[[j]][[i]]
j <- j+1
cat(".")
}
col.mse <- NULL
for(combin in combins)
{
mse <- mean((datasets.list[[combin[1]]]-datasets.list[[combin[2]]])^2)
col.mse <- c(col.mse, mse)
cat("+")
}
matrix.results <- cbind(matrix.results,col.mse)
i <- i+1
cat("*")
}
matrix.results <- rbind( matrix.results , colMeans(matrix.results))
matrix.results <- rbind( matrix.results , list.seg[[3]])
class(matrix.results) <- "MSE"
return(matrix.results)
}
#' Computing NID depending on penalty
#'
#' @param list.seg an "SEGMENTATIONS" object
#'
#' @return a matrix of results: the lines represent the observations (the replicas), the "avant-dernière" line: the mean of them. The last line: the intermediate penalties. The columns represent the nid value for each penalty.
#'
#' @examples
#' l.d1 <- log.transform(dataset1)
#' l.d2 <- log.transform(dataset2)
#' l.d3 <- log.transform(dataset3)
#' l.data <- list(l.d1,l.d2,l.d3)
#' list.res <- seg.func(func="rob_seg",list.rna.data=l.data,penalty_range=c(10,100))
#' nid.res <- nid.penalties(list.res)
#'
nid.penalties <- function(list.seg)
{
list.tau.results <- list.seg[[1]]
datasets.index <- 1:length(list.tau.results)
combins <- combn(datasets.index,2,simplify = F)
matrix.results <- NULL
#picking a datset of smt from each replica, for each beta
i <- 1
#for each beta
while( i <= length(list.tau.results[[1]]) )
{
j <- 1
datasets.list <- list()
#defining some datasets for a given beta
while(j <= length(list.tau.results) )
{
datasets.list[[j]] <- list.tau.results[[j]][[i]]
j <- j+1
cat(".")
}
#preparing the class for each segment
l <- 1
all.seg.class <- list()
for(tauset in datasets.list)
{
tauset <- c(0,tauset)
#defining a segment and attribute the class
k <- 1
seg.class <- NULL
while(k <= (length(tauset)-1))
{
seg.class <- c(seg.class,rep(k,length((tauset[k]+1):tauset[k+1])))
k <- k + 1
}
all.seg.class[[l]] <- seg.class
l <- l+1
}
#####################################
col.nid <- NULL
for(combin in combins)
{
nid.res <- NID(all.seg.class[[combin[1]]],all.seg.class[[combin[2]]])
col.nid <- c(col.nid, nid.res)
cat("+")
}
matrix.results <- cbind(matrix.results,col.nid)
i <- i+1
cat("*")
}
matrix.results <- rbind( matrix.results , colMeans(matrix.results))
matrix.results <- rbind( matrix.results , list.seg[[3]])
class(matrix.results) <- "NID"
return(matrix.results)
}
#' Ploting the mse depending on the penalties
#'
#' @param mse.res a "MSE" object
#'
#' @return a graph...
#' @examples plot(mse.res)
plot.MSE <- function(mse.res)
{
plot(x=mse.res[length(mse.res[,1]),],y=mse.res[length(mse.res[,1])-1,], xlab="Penalty", ylab="MSE", type="s", col="red")
}
#' Ploting the nid depending on the penalties
#'
#' @param mse.res a "NID" object
#'
#' @return a graph...
#' @examples plot(nid.res)
#'
plot.NID <- function(nid.res)
{
plot(x=nid.res[length(nid.res[,1]),],y=nid.res[length(nid.res[,1])-1,], xlab="Penalty", ylab="NID", type="s", col="red")
}
#' Computing different criterion for RNAs segmentations
#'
#' @param list.seg A "SEGMENTATIONS" object
#' @param criterion A string indicating the selected criterion: "MSE" or "NID" so far
#'
#' @return the selected criterion and a plot
#'
#' @examples
#' l.d1 <- log.transform(dataset1)
#' l.d2 <- log.transform(dataset2)
#' l.d3 <- log.transform(dataset3)
#' l.data <- list(l.d1,l.d2,l.d3)
#' list.res <- seg.func(func="rob_seg",list.rna.data=l.data,penalty_range=c(10,100))
#' crit.res <- seg.criteria(list.res, criterion="MSE")
#' crit.res2 <- seg.criteria(list.res, criterion="NID")
#'
seg.criteria <- function(list.seg, criterion)
{
crit.res <- NULL
if(criterion == "MSE")
{
crit.res <- mse.penalties(list.seg)
plot(crit.res)
}
else if(criterion == "NID")
{
crit.res <- nid.penalties(list.seg)
plot(crit.res)
}
return(crit.res)
}
#' The average countings per segments for each replica
#'
#' @param list.tau list of changepoints for each replica, for a given penalty value.
#' @param list.data list of dataset for each replica.
#'
#' @return a matrix containing the average count for each segment (column), for each replica(row).
#'
#' @examples
#' l.d1 <- log.transform(dataset1)
#' seg_rob1 <- Rob_seg.std(x = l.d1, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
#' tau1 <- seg_rob1$t.est
#' l.d2 <- log.transform(dataset2)
#' seg_rob2 <- Rob_seg.std(x = l.d2, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
#' tau2 <- seg_rob2$t.est
#' l.d3 <- log.transform(dataset3)
#' seg_rob3 <- Rob_seg.std(x = l.d3, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
#' tau3 <- seg_rob3$t.est
#' l.data <- list(dataset1,dataset2,dataset3)
#' l.tau <- list(tau1,tau2,tau3)
#' cps <- counts.per.seg(list.tau=l.tau,list.data=l.data)
counts.per.seg <- function(list.tau,list.data)
{
vec.tau <- unlist(list.tau)
vec.tau <- sort(vec.tau[!duplicated(vec.tau)])
vec.tau <- c(0,vec.tau)
mat.res <- NULL
for(dataset in list.data)
{
i <- 1
row.seg.mean <- NULL
while(i < length(vec.tau))
{
seg <- dataset[(vec.tau[i]+1):vec.tau[i+1]]
seg.mean <- mean(seg)
row.seg.mean <- c(row.seg.mean, seg.mean)
i <- i+1
}
mat.res <- rbind(mat.res,row.seg.mean)
}
return(mat.res)
}
#' Segmenting a dataset using rob_seg, over a penalty range scanned by a CROPS algorithm
#'
#' @param data The dataset, a vector
#' @param min_pen minimum value of the penalty range
#' @param max_pen maximum value of the penalty range
#' @param lthreshold the threshold used to detect and rescale the outliers among the dataset (3 by default)
#'
#' @return A list: respectively a matrix of penalties (the 2nd line contains the intermediate penalties),
#' a list of segmentations for each intermediate lambda and
#' a list of smt for each intermediate lambda (they are obviously all the same...)
#'
#' @examples
#' log.t1 <- log.transform(dataRNA[,1])
#' crops.out <- CROPS.RFPOP_(log.t1,min_pen = 5,max_pen = 10,lthreshold = 3)
#' View(crops.out)
#'
CROPS.RFPOP_ <- function(data, min_pen = 5, max_pen = 20, lthreshold = 3) {
NCALC=0
pen_interval <- c(min_pen, max_pen)
n <- length(data)
test_penalties <- NULL
numberofchangepoints <- NULL
penal <- NULL
overall_cost <- array()
segmentations <- NULL
segmentations.smt <- NULL
#thetas=NULL
b_between <- array()
count <- 0
while (length(pen_interval) > 0){
new_numcpts <- array()
new_penalty <- array()
new_cpts <- array()
new_smts <- list()
#new.theta<-array()
for (b in 1:length(pen_interval)) {
#ans <- Fpop(data,pen_interval[b])
ans <- Rob_seg.std(data, loss="Outlier", lambda=pen_interval[b], lthreshold = lthreshold)
## >> GR ADD FOR ROBSEG : compute unpenalized error
ans$J.est <- ans$cost[length(data)] - pen_interval[b]*length(ans$t.est)
## << GR ADD FOR ROBSEG
resultingcpts <- ans$t.est ##ERROR CORRECTED HERE
new_numcpts[b] <- length(resultingcpts)-1
cost.test <- array()
new_cpts[b] <- list(resultingcpts)
new_smts[[b]] <- ans$smt
# new.theta[b]=list(ans[[5]])
new_penalty[b] <- ans$J.est
}
if (count == 0){
print(paste("Maximum number of runs of algorithm = ", new_numcpts[1] - new_numcpts[2] + 2, sep = ""))
count <- count + length(new_numcpts)
print(paste("Completed runs = ", count, sep = ""))
}else{
count <- count + length(new_numcpts)
print(paste("Completed runs = ", count, sep = ""))
}
## Add the values calculated to the already stored values
test_penalties <- unique((sort(c(test_penalties,pen_interval))))
new_numcpts <- c(numberofchangepoints,new_numcpts)
new_penalty <- c(penal,new_penalty)
new_cpts <- c(segmentations, new_cpts)
new_smts <- c(segmentations.smt, new_smts)
#new.theta <- c(thetas,new.theta)
numberofchangepoints <- -sort(-new_numcpts) ##can use sort to re-order
penal <- sort(new_penalty)
ls <- array()
for (l in 1:length(new_cpts)){
ls[l] <- length(new_cpts[[l]])
}
ls1 <- sort(ls,index.return = T, decreasing = T)
ls1 <- ls1$ix
segmentations <- new_cpts[c(ls1)]
segmentations.smt <- new_smts[c(ls1)]
#thetas=new.theta[c(ls1)]
## compute new values
pen_interval <- NULL
tmppen_interval <- NULL
for (i in 1:(length(test_penalties)-1)){
if(abs(numberofchangepoints[i]-numberofchangepoints[i+1])>1){ ##only need to add a beta if difference in cpts>1
j <- i+1
tmppen_interval <- (penal[j] - penal[i]) * ((numberofchangepoints[i] - numberofchangepoints[j])^-1)
pen_interval <- c(pen_interval, tmppen_interval )
}
}
## discard penalties close to tested one
if(length(pen_interval)>0){
for(k in length(pen_interval):1){
if(min(abs(pen_interval[k]-test_penalties)) < 1e-2) {
pen_interval=pen_interval[-k]
}
}
}
}
##PRUNE VALUES WITH SAME num_cp
for(j in length(test_penalties):2){
if(numberofchangepoints[j]==numberofchangepoints[j-1]){
numberofchangepoints=numberofchangepoints[-j]
test_penalties=test_penalties[-j]
penal=penal[-j]
segmentations = segmentations[-j]
segmentations.smt = segmentations.smt[-j]
#thetas=thetas[-j]
}
}
###calculate beta intervals
nb=length(test_penalties)
beta.int=rep(0,nb)
beta.e=rep(0,nb)
for(k in 1:nb){
if(k==1){
beta.int[1]=test_penalties[1]
}else{
beta.int[k]=beta.e[k-1]
}
if(k==nb){
beta.e[k]=test_penalties[k]
}else{
beta.e[k]=(penal[k]-penal[k+1])/(numberofchangepoints[k+1]-numberofchangepoints[k])
}
}
return(list(rbind(test_penalties,beta.int,numberofchangepoints,penal),segmentations, segmentations.smt))
}
|
3b4770d75bf74871b9b63ec822bd715aead90ec8
|
2161e2c9b1463f3f0b8d27a9447c136e5e08d2b9
|
/man/getDesign.Rd
|
64d1c1b3c2b7742a07b9deb26e8f4e3cf3d118c3
|
[] |
no_license
|
NCRN/NCRNbirds
|
14a258e8182849bb0434eb4368fa291105d56a7c
|
5a512b736d674d9308c27667e7a99b142aebfcef
|
refs/heads/master
| 2023-08-16T13:00:26.367713
| 2023-07-11T15:54:50
| 2023-07-11T15:54:50
| 32,335,489
| 5
| 12
| null | 2023-08-17T15:09:47
| 2015-03-16T15:44:44
|
R
|
UTF-8
|
R
| false
| true
| 1,277
|
rd
|
getDesign.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDesign.R
\name{getDesign}
\alias{getDesign}
\title{getDesign}
\usage{
getDesign(object, info)
}
\arguments{
\item{object}{An \code{NCRNbirds} object or a list of such objects.}
\item{info}{A length one chararcter vector. Indicates which aspect of the study design should be returned.
\describe{
\item{"visits"}{The number of visits typcially made to a point during a monitoring season. Retrieved from the \code{VisitNumber} slot.}
\item{"bands"}{The distance bands used for the study. Retrieved from the \code{Bands} slot.}
\item{"intervals"}{The time intervals used for the study. Retrieved from the \code{VisitNumber} slot.}
}}
}
\description{
Returns informaiton on the study design stored in an \code{NCRNbirds} object.
}
\details{
This function returns information about the study design used to collect the data. The type of information is
determined by the \code{info} argument. When "visits" is selected the output will be a length one numeric vector, the other
two options will return a\code{data.frame}. If the input is a \code{list} of \code{NCRNbird} objects, the the output will be eithe a
list of vectors or a list of \code{date.frame}s depending on the \code{info} argument.
}
|
5aec6af13d02ad6a33e5667781ff7903312bfd6e
|
bad08314942d890670cb8186827e93387f8242cb
|
/R/HLgofTest.R
|
37ad7bf070b684bf4f0546eba8f3696175d89fb1
|
[] |
no_license
|
stamats/MKmisc
|
faaa5a4bc04d015143fcd2d468bc11aa12ef5633
|
e738e1f1b18899af42c1149335c6ee063e9de80c
|
refs/heads/master
| 2022-11-25T06:06:56.692986
| 2022-11-19T15:35:13
| 2022-11-19T15:35:13
| 33,780,395
| 10
| 2
| null | 2015-06-29T18:02:53
| 2015-04-11T15:13:48
|
R
|
UTF-8
|
R
| false
| false
| 2,875
|
r
|
HLgofTest.R
|
HLgof.test <- function (fit, obs, ngr = 10, X, verbose = FALSE){
ngr1 <- ngr
## Hosmer-Lemeshow C statistic
brks <- unique(quantile(fit, probs = seq(0, 1, by = 1/ngr)))
cutfit <- cut(fit, breaks = brks, include.lowest = TRUE)
if(length(brks) < ngr+1){
warning("Found only ", length(brks)-1, " different groups for Hosmer-Lemesho C statistic.")
ngr <- length(brks)-1
}
if(verbose){
cat("Groups for Hosmer-Lemeshow C statistic:\n")
print(table(cutfit))
}
Obs <- xtabs(cbind("0s" = 1 - obs, "1s" = obs) ~ cutfit)
Exp <- xtabs(cbind("Os" = 1 - fit, "1s" = fit) ~ cutfit)
chisq <- sum((Obs - Exp)^2/Exp, na.rm = TRUE)
names(chisq) <- "X-squared"
param <- ngr-2
names(param) <- "df"
P <- 1 - pchisq(chisq, param)
## Hosmer-Lemeshow H statistic
cutfit1 <- cut(fit, breaks = ngr1, include.lowest = TRUE)
if(verbose){
cat("Groups for Hosmer-Lemeshow H statistic:\n")
print(table(cutfit1))
}
Obs1 <- xtabs(cbind(1 - obs, obs) ~ cutfit1)
Exp1 <- xtabs(cbind(1 - fit, fit) ~ cutfit1)
chisq1 <- sum((Obs1 - Exp1)^2/Exp1, na.rm = TRUE)
names(chisq1) <- "X-squared"
param1 <- ngr1-2
names(param1) <- "df"
P1 <- 1 - pchisq(chisq1, param1)
dname <- paste(deparse(substitute(fit)), "and", deparse(substitute(obs)))
C <- structure(list(statistic = chisq, parameter = param,
p.value = P, method = "Hosmer-Lemeshow C statistic", data.name = dname,
observed = Obs, expected = Exp), class = "htest")
H <- structure(list(statistic = chisq1, parameter = param1,
p.value = P1, method = "Hosmer-Lemeshow H statistic", data.name = dname,
observed = Obs1, expected = Exp1), class = "htest")
if(!missing(X)){
## le Cessie-van Houwelingen-Copas-Hosmer unweighted sum of squares test for global goodness of fit
# X <- cbind(1, X)
y <- obs == 1
p <- fit
sse <- sum((y - p)^2)
wt <- p * (1 - p)
d <- 1 - 2 * p
z <- lm.wfit(X, d, wt, method = "qr")
res <- z$residuals * sqrt(z$weights)
sd <- sqrt(sum(res^2))
ev <- sum(wt)
z <- (sse - ev)/sd
names(z) <- "z"
P2 <- 2 * pnorm(abs(z), lower.tail = FALSE)
stats <- c(sse, ev, sd, z, P)
names(stats) <- c("Sum of squared errors", "Expected value|H0",
"SD", "Z", "P")
gof <- structure(list(statistic = z, p.value = P2,
method = "le Cessie-van Houwelingen-Copas-Hosmer global goodness of fit test",
data.name = dname,
observed = sse, expected = ev), class = "htest")
return(list(C = C, H = H, gof = gof))
}
list(C = C, H = H)
}
|
06444122bbb114511fbfcab1ba26276b6d29643e
|
edee4a9c4cf3c35a52dfc99ac53279ab23e069ab
|
/examples/FeatureCollection/vector_symbology.R
|
88a391a89cbc14c13ff36c36ce4eaa3334c86e92
|
[
"Apache-2.0"
] |
permissive
|
benardonyango/rgee
|
a8dd22a72f2c77a0d1e88f6177c740942fe2cfbc
|
e9e0f2fa7065e79c1c794bd7387fd0af633031ff
|
refs/heads/master
| 2022-04-09T18:10:23.689798
| 2020-03-31T10:56:00
| 2020-03-31T10:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
vector_symbology.R
|
library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
fc = ee$FeatureCollection('TIGER/2018/States')
image = ee$Image()$paint(
featureCollection = fc,
color = 1,
width = 3
)
Map$setCenter(-99.844, 37.649, zoom = 5)
Map$addLayer(
eeObject = image,
visParams = list(palette = 'FF0000'),
name = 'TIGER/2018/States')
|
77548822eea1c8452f63229fea111417450f0db3
|
25524de30d715f1464789860405e6d98d4e71159
|
/hw1.R
|
9d9c4abc9073b3173d8a1a0dc858d38318451219
|
[] |
no_license
|
sunnyhyo/Multivariate-Statistical-Analysis
|
1d3e5f46418de4dcadd2240f383d35aa51fc8648
|
4ebfd44670ab8b1f30a2f238603b1515b0877e33
|
refs/heads/master
| 2021-04-26T23:02:13.698323
| 2018-05-28T01:45:19
| 2018-05-28T01:45:19
| 123,919,384
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 657
|
r
|
hw1.R
|
#hw1
A <- matrix(c(1,2,1,2,3,2,1,2,6),
ncol=3, byrow=T)
A
#1 rank(A)
library(Matrix)
rankMatrix(A)
#2 eigenvalues, eigenvectors
eigen(A)
#3 spectral decomposition
P <-eigen(A)$vectors
E <-diag(eigen(A)$values)
P%*%E%*%t(P)
#4 trace(A)
sum(diag(A))
#5 A inverse
A.inv<-solve(A)
A.inv
#6 eigenvalues, eigenvectors A inverse
eigen(A.inv)
#7 spectral decomposition
P <-eigen(A.inv)$vectors
E <-diag(eigen(A.inv)$values)
P%*%E%*%t(P)
#8 AA'
B<- A%*%t(A)
B
#9 eigenvalues and eigenvectors of AA'
eigen(B)
#10 inverse of AA'
solve(B)
#11 AA
C<- A%*%A
C
#12 spectral decomposition of AA
P <-eigen(C)$vectors
E <-diag(eigen(C)$values)
P%*%E%*%t(P)
|
5f511b1047e1beb097b6114e719ecadb168607fb
|
abdccf6134a4a9bac2ebe27e4fc1c449eab8b1f1
|
/newdateversion.R
|
12b8c71015c483f8a858aed6b91ae152afb68234
|
[] |
no_license
|
carolinemsinclair/OMFSBillingDynamics
|
f0de69cefce97aaf5434f2d5aef08446268a9501
|
8b037fcf31872a1d4ca925c73cd73f6d495d6597
|
refs/heads/main
| 2023-03-21T16:28:04.822302
| 2021-03-09T18:08:26
| 2021-03-09T18:08:26
| 346,099,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,918
|
r
|
newdateversion.R
|
#attemptwithxls
library(readxl)
library(lubridate)
library(dbplyr)
library(dplyr)
library(tidyr)
library(readr)
drugs=read.csv("updateddrugs.csv")
blood=read.csv("blood.csv",header = FALSE)
wcprice=read.csv("WCprice.csv")
weird2007codes<-read.csv("weird2007codes.csv",header=FALSE)
OMFS2003<-read.csv("RVU2003.csv")
OMFSJan14<-read.csv("OMFSJan14.csv")
OMFSApr14<-read.csv("OMFSApr14.csv")
OMFSJul14<-read.csv("OMFSJul14.csv")
OMFSOct14<-read.csv("OMFSOct14.csv")
OMFSJan15<-read.csv("OMFSJan15.csv")
OMFSApr15<-read.csv("OMFSApr15.csv")
OMFSJul15<-read.csv("OMFSJul15.csv")
OMFSOct15<-read.csv("OMFSOct15.csv")
OMFSJan16<-read.csv("OMFSJan16.csv")
OMFSApr16<-read.csv("OMFSApr16.csv")
OMFSJul16<-read.csv("OMFSJul16.csv")
OMFSOct16<-read.csv("OMFSOct16.csv")
OMFSJan17<-read.csv("OMFSJan17.csv")
OMFSApr17<-read.csv("OMFSApr17.csv")
OMFSJul17<-read.csv("OMFSJul17.csv")
OMFSOct17<-read.csv("OMFSOct17.csv")
OMFSJan18<-read.csv("OMFSJan18.csv")
OMFSApr18<-read.csv("OMFSApr18.csv")
OMFSJul18<-read.csv("OMFSJul18.csv")
OMFSOct18<-read.csv("OMFSOct18.csv")
OMFSJan19<-read.csv("OMFSJan19.csv")
OMFSApr19<-read.csv("OMFSApr19.csv")
OMFSJul19<-read.csv("OMFSJul19.csv")
OMFSOct19<-read.csv("OMFSOct19.csv")
####Find code in row input####
findcode<- function (rowinput) {
column7.output=""
if (as.character(rowinput[1])!=""){
code=as.character(rowinput[3])
codedate=substr(as.character(rowinput[1]),1,10)
units=as.character(rowinput[6])
charges=as.character(rowinput[7])
column7.output=OMFS(codedate,code,units,charges)}
return(column7.output)
}
####OMFS type####
OMFS <-function (codedate,code,units,charges) {
parsed.date<-mdy(codedate)
OMFS.amount=0.00
OMFS.temp=0.00
if (startsWith(code,"99070")){ #drug code
OMFS.temp=search.drugs(code)
if(nchar(OMFS.temp)==0){
OMFS.amount=parse_number(charges)
}
else{
OMFS.amount=OMFS.temp
}
return(OMFS.amount)
}
else{
if(startsWith(code,"ML") | startsWith(code, "ml")){ #med legal code
OMFS.amount=parse_number(charges)
}
else{
if(startsWith(code,"WC")|startsWith(code,"wc")){
if (year(parsed.date)>2013){
if (year(parsed.date)==2014){
OMFS.amount=wcprice$X2014[wcprice$code==code]
}
if (year(parsed.date)==2015){
OMFS.amount=wcprice$X2015[wcprice$code==code]
}
if (year(parsed.date)==2016){
OMFS.amount=wcprice$X2016[wcprice$code==code]
}
if (year(parsed.date)==2017){
OMFS.amount=wcprice$X2017[wcprice$code==code]
}
if (year(parsed.date)==2018){
OMFS.amount=wcprice$X2018[wcprice$code==code]
}
if (year(parsed.date)==2019){
OMFS.amount=wcprice$X2019[wcprice$code==code]
}
}
}
else{
OMFS.temp=search.OMFS(parsed.date,substr(code,1,5),units)
if(is.na(OMFS.temp)||nchar(OMFS.temp)==0 || OMFS.temp==0.00){
OMFS.amount=parse_number(charges)
}
else{
OMFS.amount=OMFS.temp
}
}}
return(OMFS.amount)
}
}
####searching for OMFS is CSV files####
search.OMFS<-function(codedate,code,units){
pre2014span<-seq.Date(from=as_date("2000-01-01"),to=as_date("2013-12-31"),by="day")
weird2007span<-seq.Date(from=as_date("2007-02-15"),to=as_date("2013-12-31"),by="day")
jan14span<-seq.Date(from=as_date("2014-01-01"),to=as_date("2014-03-31"),by="day")
apr14span<-seq.Date(from=as_date("2014-04-01"),to=as_date("2014-06-30"),by="day")
jul14span<-seq.Date(from=as_date("2014-07-01"),to=as_date("2014-09-30"),by="day")
oct14span<-seq.Date(from=as_date("2014-10-01"),to=as_date("2014-12-31"),by="day")
jan15span<-seq.Date(from=as_date("2015-01-01"),to=as_date("2015-03-31"),by="day")
apr15span<-seq.Date(from=as_date("2015-04-01"),to=as_date("2015-06-30"),by="day")
jul15span<-seq.Date(from=as_date("2015-07-01"),to=as_date("2015-09-30"),by="day")
oct15span<-seq.Date(from=as_date("2015-10-01"),to=as_date("2015-12-31"),by="day")
jan16span<-seq.Date(from=as_date("2016-01-01"),to=as_date("2016-03-31"),by="day")
apr16span<-seq.Date(from=as_date("2016-04-01"),to=as_date("2016-06-30"),by="day")
jul16span<-seq.Date(from=as_date("2016-07-01"),to=as_date("2016-09-30"),by="day")
oct16span<-seq.Date(from=as_date("2016-10-01"),to=as_date("2016-12-31"),by="day")
jan17span<-seq.Date(from=as_date("2017-01-01"),to=as_date("2017-03-31"),by="day")
apr17span<-seq.Date(from=as_date("2017-04-01"),to=as_date("2017-06-30"),by="day")
jul17span<-seq.Date(from=as_date("2017-07-01"),to=as_date("2017-09-30"),by="day")
oct17span<-seq.Date(from=as_date("2017-10-01"),to=as_date("2017-12-31"),by="day")
jan18span<-seq.Date(from=as_date("2018-01-01"),to=as_date("2018-03-31"),by="day")
apr18span<-seq.Date(from=as_date("2018-04-01"),to=as_date("2018-06-30"),by="day")
jul18span<-seq.Date(from=as_date("2018-07-01"),to=as_date("2018-09-30"),by="day")
oct18span<-seq.Date(from=as_date("2018-10-01"),to=as_date("2018-12-31"),by="day")
jan19span<-seq.Date(from=as_date("2019-01-01"),to=as_date("2019-03-31"),by="day")
apr19span<-seq.Date(from=as_date("2019-04-01"),to=as_date("2019-06-30"),by="day")
jul19span<-seq.Date(from=as_date("2019-07-01"),to=as_date("2019-09-30"),by="day")
oct19span<-seq.Date(from=as_date("2019-10-01"),to=as_date("2019-12-31"),by="day")
if (codedate %in% pre2014span){
if(code %in% blood$V1){
price=blood[blood$V1==code,2]
price2=price*as.numeric(units)
}
else{
if (codedate %in% weird2007span & code %in% weird2007codes$V1){
price=weird2007codes[weird2007codes$V1==code,2]
price2=price*as.numeric(units)
}
else{
price=OMFS2003 %>% filter(V2==code) %>% select(V7)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
}
}
return(as.character(price2))
}
if (codedate %in% jan14span){
price=OMFSJan14 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr14span){
price=OMFSApr14 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul14span){
price=OMFSJul14 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct14span){
price=OMFSOct14 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jan15span){
price=OMFSJan15 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr15span){
price=OMFSApr15 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul15span){
price=OMFSJul15 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct15span){
price=OMFSOct15 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jan16span){
price=OMFSJan16 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr16span){
price=OMFSApr16 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul16span){
price=OMFSJul16 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct16span){
price=OMFSOct16 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jan17span){
price=OMFSJan17 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr17span){
price=OMFSApr17 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul17span){
price=OMFSJul17 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct17span){
price=OMFSOct17 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jan18span){
price=OMFSJan18 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr18span){
price=OMFSApr18 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul18span){
price=OMFSJul18 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct18span){
price=OMFSOct18 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jan19span){
price=OMFSJan19 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% apr19span){
price=OMFSApr19 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% jul19span){
price=OMFSJul19 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
if (codedate %in% oct19span){
price=OMFSOct19 %>% filter(HPCCodes==code) %>% select(OMFS)
price=as.numeric(as.character(price[[1]]))
if (length(price>1)){price=max(price)}
units=as.numeric(units)
price2=price*units
return(price2)
}
}
####searching for drug codes####
search.drugs <-function(code){ #OMFS for drugs
smallcode=gsub(".*-","",code)
price=drugs %>% filter(Column2==smallcode) %>% select(PRICE)
price=as.numeric(as.character(price[[1]]))
return(price)
}
####payments####
paymentsmade<-function(bill,CalculatedOMFSColumn){
for (i in 1: nrow(bill)){
if (startsWith(as.character(bill[i,7]),"($")){
CalculatedOMFSColumn[i]=parse_number(as.character(bill[i,7]))*-1
}
}
return(CalculatedOMFSColumn)
}
###################
####loadingbill####
tempbill=file.choose(new=FALSE) #call in bill
bill=read.csv(tempbill,colClasses = "character") #read bill
smallerbill=bill[,1:15]
smallerbill<-as.data.frame(smallerbill)
payments=(smallerbill[,10:15])
charges=(smallerbill[,1:9])
newnames<-c("X.1","X.2","X.3","X.4","X.6","X.7")
colnames(payments)<-newnames
payments$ID<-seq.int(nrow(payments))
payments$X.5<-c("")
payments$X.8<-c("")
payments$X<-c("")
charges$ID<-seq.int(nrow(charges))
newbill<-rbind(payments,charges)
newbill<-arrange_at(newbill,"ID")
newbill1<-newbill[,order(colnames(newbill))]
newbill1$ID<-NULL
newbill1$X.8<-NULL
colnamesbill<-c("DOS","DOE","Procedue","Modifier","Description","Unit","Charges","Total Charges")
colnames(newbill1)<-colnamesbill
newbill1<-newbill1 %>% filter(Charges!="")
OMFSColumn<-apply(newbill1,1,findcode) #generate OMFS column
updatedwithPayments<-paymentsmade(newbill1,OMFSColumn)
updatedwithPay<-vapply(updatedwithPayments,paste, collapse=",", character(1L))
newbill1$OMFS=updatedwithPay
write.csv(file="outputbill.csv",newbill1, na="")
|
3a683e3531ca71c1385684613e16e19651938107
|
750cacb8a12d2ef36c343072f979be48a01e3209
|
/rCode/rawCode/regexTrials.R
|
f0507cba1c5692a75a4f3dfdd47b9e57ddb7a2c6
|
[] |
no_license
|
mterion/capstoneProj
|
b192de37c7128677d6bfa5dbf8247f92bf1646f3
|
219f13700db7ced0b0a6331e9a59d217a1847e66
|
refs/heads/master
| 2023-03-02T09:37:19.990896
| 2021-01-28T06:47:51
| 2021-01-28T06:47:51
| 319,615,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,924
|
r
|
regexTrials.R
|
domainNamesDf <- read.table("./data/rawData/domainName.txt", sep = "\n", quote = "", header = F, colClasses = "character", encoding="UTF-8")
domainNamesDf <- domainNamesDf %>%
rename(names = V1) %>%
mutate(names = tolower(names)) %>%
mutate(regEx = paste0("[^ ]+.", names,"$")) %>%
filter(names %in% "com" | names %in% "org")
#head(domainNamesDf)
[^ ]+.ing$
str <- "This is domaine.org of a string, with alea.com included into it, and osteo.fr as well"
strTok <- tokens(str)
head(strTok)
result <- tokens_remove(strTok, pattern = "[^ ]+.com$", valuetype = "regex", padding=TRUE )
head(result)
result <- tokens_remove(strTok, pattern = "[^ ]+(\\.ing)$", valuetype = "regex", padding=TRUE )
head(result)
result <- tokens_remove(strTok, pattern = domainNamesDf$regEx, valuetype = "regex", padding=TRUE )
head(result)
[^ ]+.com$
domainNamesDf$regEx
#===========================
toksCAllSW <- tokens_remove(toksCAll6, pattern = unsigWordsDf$word, valuetype = "fixed", padding=TRUE ) # fixed for exact matching
rm(toksCAll6, unsigWords, unsigWordsDf)
# head(toksCAllSW, 3)
myStr <- "The U.S. final is #in a FB account during the W/E! What's that CC."
myStr <- tolower(myStr)
toks <- tokens(myStr, remove_punct = TRUE, remove_symbols = TRUE, remove_numbers = TRUE, remove_url = TRUE, remove_separators = TRUE)
toks
toks <- tokens(myStr, remove_punct = FALSE, remove_symbols = FALSE, remove_numbers = FALSE, remove_url = FALSE, remove_separators = FALSE)
toks <- tokens_tolower(toks)
print(toks, max_ntoken= 100)
twitterAbbr <- c(
"CC","CX","CT","DM","HT","MT","PRT","RT","EM","EZine","FB","LI","SEO","SM","SMM","SMO","SN","SROI","UGC","YT","AB","ABT","AFAIK","AYFKMWTS",
"B4","BFN","BGD","BH","BR","BTW","CD9","CHK","CUL8R","DAM","DD","DF","DP","DS","DYK","EM","EML","EMA","F2F","FTF","FB","FF","FFS","FM","FOTD",
"FTW","FUBAR","FWIW","GMAFB","GTFOOH","GTS","HAGN","HAND","HOTD","HT","HTH","IC","ICYMI","IDK","IIRC","IMHO","IR","IWSN","JK","JSYK","JV","KK",
"KYSO","LHH","LMAO","LMK","LO","LOL","MM","MIRL","MRJN","NBD","NCT","NFW","NJoy","NSFW","NTS","OH","OMFG","OOMF","ORLY","PLMK","PNP","QOTD","RE",
"RLRT","RTFM","RTQ","SFW","SMDH","SMH","SNAFU","SO","SOB","SRS","STFU","STFW","TFTF","TFTT","TJ","TL","TLDR","TL;DR","TMB","TT","TY","TYIA","TYT",
"TYVW","W","W/","W/E","WE","WTV","YGTR","YKWIM","YKYAT","YMMV","YOLO","YOYO","YW","ZOMG",
"#BrandChat","#CMAD","#CMGR","#FB","#FF","#in","#LI","#LinkedInChat","#Mmchat","#Pinchat","#SMManners","#SMMeasure","#SMOchat","#SocialChat","#SocialMedia"
)
tA <- "\\b(CC|FB|w/e|#cmad|w/)\\b"
twitterAbbrRegEx <-tolower("\\b(
CC|CX|CT|DM|HT|MT|PRT|RT|EM|EZine|FB|LI|SEO|SM|SMM|SMO|SN|SROI|UGC|YT|AB|ABT|AFAIK|AYFKMWTS|
B4|BFN|BGD|BH|BR|BTW|CD9|CHK|CUL8R|DAM|DD|DF|DP|DS|DYK|EM|EML|EMA|F2F|FTF|FB|FF|FFS|FM|FOTD|
FTW|FUBAR|FWIW|GMAFB|GTFOOH|GTS|HAGN|HAND|HOTD|HT|HTH|IC|ICYMI|IDK|IIRC|IMHO|IR|IWSN|JK|JSYK|JV|KK|
KYSO|LHH|LMAO|LMK|LO|LOL|MM|MIRL|MRJN|NBD|NCT|NFW|NJoy|NSFW|NTS|OH|OMFG|OOMF|ORLY|PLMK|PNP|QOTD|RE|
RLRT|RTFM|RTQ|SFW|SMDH|SMH|SNAFU|SO|SOB|SRS|STFU|STFW|TFTF|TFTT|TJ|TL|TLDR|TL;DR|TMB|TT|TY|TYIA|TYT|
TYVW|W/E|W/|W|WE|WTV|YGTR|YKWIM|YKYAT|YMMV|YOLO|YOYO|YW|ZOMG|
#BrandChat|#CMAD|#CMGR|#FB|#FF|#in|#LI|#LinkedInChat|#Mmchat|#Pinchat|#SMManners|#SMMeasure|#SMOchat|#SocialChat|#SocialMedia
)\\b")
tA <- tolower(tA)
twittAbbrDf <- data.frame(twitterAbbr, stringsAsFactors = F) %>%
rename(abbr = twitterAbbr) %>%
mutate(abbr = tolower(abbr))
myStrDf <- data.frame(myStr, stringsAsFactors = F)
myStrDf
newStrDf <- myStrDf %>%
mutate(myStr = stri_replace_all_regex(myStr, twitterAbbrRegEx, ''))
newStrDf
s <- "The U.S. final is #in a FB account during the W/E! What's that CC."
s <- tolower(s)
stri_replace_all_regex(s, "(fb|cc|w/|w/e|#cmad)", '_')
indivCharRemovalRegEx <- "[>|<|=|~|#|([0-9]+-[0-9]+)|%]" # clean characters and not word. Words, punct, emojis are done later at the level of tokenization
# Df
myStrDF <- blogsDf %>%
rename(text = as.character.blogsLines.) %>%
mutate(text = as.character(text)) %>%
#mutate(text = stri_replace_all_regex(text, '\"', ' ')) %>% # use stringi because much faster than gsub
#mutate(text = stri_replace_all_regex(text, indivCharRemovalRegEx , " ")) %>% #need to replace with one space, if not will make a word out of 2 word when removing the unwanted char
mutate(doc_id = "enBlogs") %>%
mutate(type = "blogs")
|
a79a8a4164d2efde7abba6c169e8f8360c7f9bb7
|
5558e08bfe36159684f95a0782f92e30156f5c1d
|
/plot1.R
|
043927c0e5705e68950942fee00adfa1743228da
|
[] |
no_license
|
glenbert/Modul4Week1Assignment
|
2c879dea4a6b3563e34db65f3f8831f93cc4c6f1
|
b1da967cd9568be342f948adaf39827b1984d209
|
refs/heads/master
| 2021-01-25T13:18:01.498152
| 2018-03-02T08:05:11
| 2018-03-02T08:05:11
| 123,549,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
plot1.R
|
## Download the zip file and unzip
## Make sure to save the unzip file in your working directory
## Rename the txt file to data.txt
dt <- read.table('./data.txt', header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
## Reformat or conver the date
dt$Date <- as.Date(dt$Date, format="%d/%m/%Y")
## Select the data by using the subset function
pltdt <- subset(dt, Date >= "2007-02-01" & Date <= "2007-02-02")
## Convert the Global_active_power to numeric
pltdt$Global_active_power <- as.numeric(pltdt$Global_active_power)
png("plot1.png", width=480, height=480)
hist(pltdt$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
ab9162d1c839b8447812629ee5cfd7929ef2ce98
|
5db3703c9817250f1a245107adac883f48b03b53
|
/bootstrap/had.27.46a20.R
|
198371f7b4d450d620cd00f89a0f18ea4d06f5c4
|
[] |
no_license
|
ices-taf/2020_4029-29_SpecialRequest
|
656ff69f76be57e67af0c17d96cb507fdc896d78
|
abedb79202600c6ddf7299988cdd8265d16e7d91
|
refs/heads/master
| 2022-12-23T22:49:11.188668
| 2020-10-09T07:32:07
| 2020-10-09T07:56:01
| 289,861,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
had.27.46a20.R
|
#' Data from had.27.46a20
#'
#' @name had.27.46a20
#' @format csv file
#' @tafOriginator ICES, WGCSE
#' @tafYear 2020
#' @tafAccess Public
#' @tafSource script
library(icesTAF)
taf.library(icesSharePoint)
spgetfile(
"Documents/Preliminary documents/bootstrap_initial_data/had.27.46a20/f-at-age.csv",
"/admin/Requests",
"https://community.ices.dk",
destdir = "."
)
# read lowestoft file
fdata <- read.taf("f-at-age.csv")
years <- fdata$Year
ages <- as.numeric(colnames(fdata)[-1])
data <-
data.frame(
year = rep(years, length(ages)),
age = rep(ages, each = length(years)),
harvest = unname(unlist(fdata[, -1]))
)
data$stock_code <- "had.27.46a20"
data$assessment_year <- 2020
write.taf(data)
cat(
"NOTE:
* Estimates refer to the full year (January–December) except for age 0, for which the mortality rate given refers to the second half-year only (July–December).
* The 2020 estimates are TSA forecasts
",
file = "README.md"
)
# clean up
unlink("f-at-age.csv")
|
7ead8a185725055285f31ac3604c955cc1ae1b46
|
9b835eb60ed6c453ad224a95b6c2e1776d164194
|
/plot4.R
|
cbb542ecd78dfddc86465369592a96910e17e3ed
|
[] |
no_license
|
Wondamike7/ExData_Plotting1
|
c30d8dfd4142634ebd6857de797351b47188ef36
|
453183de8907cf4ac942fe180cae1badb8ba30a9
|
refs/heads/master
| 2020-12-07T00:47:00.299143
| 2015-04-09T07:55:22
| 2015-04-09T07:55:22
| 33,649,271
| 0
| 0
| null | 2015-04-09T04:58:28
| 2015-04-09T04:58:28
| null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
plot4.R
|
## look for the program to load data, if not found, change directory
if(!"LoadData.R" %in% list.files()) {
setwd("C:/Users/fieldrep/Documents/Coursera/Repositories/ExData_Plotting1/")
}
## with LoadData in directory, source the program to load and tidy the dataset
## note: this will error out if the data set (or the source zip file) are not in the working directory
source("LoadData.R")
## open a workspace for the png file
png(filename = "plot4.png",
height = 480, width = 480,
units = "px", bg = "white")
## set parameters to build four charts in one. didn't change any margins or other global parameters.
par(mfrow = c(2,2)) ## will build left to right for top row and then bottom row
## first plot (plot2 from previous)
plot(dat_sub$DateTime, dat_sub$Global_active_power,
type = "l", xlab = "", ylab = "Global Active Power") ## label doesn't show (kilowatts) anymore
## second plot (new)
plot(dat_sub$DateTime, dat_sub$Voltage,
type = "l", xlab = "datetime", ylab = "Voltage")
## third plot (plot3 from previous)
plot(dat_sub$DateTime,dat_sub$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(dat_sub$DateTime,dat_sub$Sub_metering_2,col="red")
lines(dat_sub$DateTime,dat_sub$Sub_metering_3,col="blue")
legend(legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
x = "topright", lwd = 1, cex = 0.9,
col = c("black","red","blue"), bty = "n")
## unlike plot 3, this has no box around the legend, and the legend also needs to be smaller (cex = 0.9 looked about right)
## fourth plot (new)
plot(dat_sub$DateTime, dat_sub$Global_reactive_power,
type = "l", xlab = "datetime", ylab = "Global_reactive_power")
## close the png workspace
dev.off()
|
695537cfd26fa6406d514a044a0e607bdf62a44c
|
934f82c4a6f7b3364c97834bab6a7514bca79853
|
/man/automateDataPreparation.Rd
|
462061326d2cf6035a5b3dbd04378dcbc62f7287
|
[] |
no_license
|
sachseka/eatPrep
|
164c3b007355b6f366d2bc9ee9fbcb300d44ff57
|
7f3cfcad9ac0479dafed0ddd9ba2ad3db50238ea
|
refs/heads/master
| 2023-09-05T17:54:15.570238
| 2023-02-21T11:11:38
| 2023-02-21T11:11:38
| 151,248,346
| 0
| 1
| null | 2023-08-23T18:02:00
| 2018-10-02T12:09:19
|
R
|
UTF-8
|
R
| false
| false
| 5,756
|
rd
|
automateDataPreparation.Rd
|
\name{automateDataPreparation}
\alias{automateDataPreparation}
\title{Automate Data Preparation using Functions from Package eatPrep}
\description{
This function facilitates automated data preparation and wraps most functions from the \code{eatPrep} package.
}
\usage{automateDataPreparation(datList = NULL, inputList, path = NULL,
readSpss, checkData, mergeData, recodeData, recodeMnr = FALSE,
aggregateData, scoreData, writeSpss, collapseMissings = FALSE,
filedat = "mydata.txt", filesps = "readmydata.sps", breaks=NULL,
nMbi = 2, rotation.id = NULL, suppressErr = FALSE, recodeErr = "mci",
aggregatemissings = NULL, rename = TRUE, recodedData = TRUE,
addLeadingZeros=FALSE, truncateSpaceChar = TRUE, newID = NULL, oldIDs = NULL,
missing.rule = list(mvi = 0, mnr = 0, mci = NA, mbd = NA, mir = 0, mbi = 0),
verbose=FALSE)}
\arguments{
\item{datList}{
A list of data frames (see \code{data(\link{inputDat})}). If \code{NULL}, \code{readSPSS} has to be \code{TRUE}. In this case, the function attempts to read SPSS .sav files.
}
\item{inputList}{
A list of data frames containing neccessary information for data preparaton (see \code{data(inputList)} for details).
}
\item{path}{
A character vector containing the path required by for \code{\link{writeSpss}}.
Default is the current \R working directory.
}
\item{readSpss}{
Logical: If \code{TRUE}, the function \code{\link{readSpss}} will be called.
}
\item{checkData}{
Logical: If \code{TRUE}, the function \code{\link{checkData}} will be called.
}
\item{mergeData}{
Logical: If \code{TRUE}, the function \code{\link{mergeData}} will be called.
}
\item{recodeData}{
Logical: If \code{TRUE}, the function \code{\link{recodeData}} will be called.
}
\item{recodeMnr}{
Logical: If \code{TRUE}, the function \code{\link{mnrCoding}} will be called.
}
\item{aggregateData}{
Logical: If \code{TRUE}, the function \code{\link{aggregateData}} will be called.
}
\item{scoreData}{
Logical: If \code{TRUE}, the function \code{\link{scoreData}} will be called.
}
\item{collapseMissings}{
Logical: If \code{TRUE}, the function \code{\link{collapseMissings}} will be called and a data frame with recoded missing values according to argument \code{missing.rule} will be returned.
}
\item{writeSpss}{
Logical: If \code{TRUE}, the function \code{\link{writeSpss}} will be called.
}
\item{filedat}{
a character string containing the name of the output data file for \code{\link{writeSpss}}.
}
\item{filesps}{
a character string containing the name of the output syntax file for \code{\link{writeSpss}}.
}
\item{breaks}{
Numeric vector passed on to function \code{\link{mnrCoding}} containing the number of blocks after which \code{mbi} shall be recoded to \code{mnr}, e.g., \code{c(1,2)} to specify breaks after the first and second block.
numeric vector (argument used by ).
}
\item{nMbi}{
Numeric vector of length 1 passed on to function \code{\link{mnrCoding}} containing the number of \code{mbi}-Codes required at the end of a block to code \code{mnr}. Needs to be > 0.
}
\item{rotation.id}{
Character vector of length 1 passed on to function \code{\link{mnrCoding}} indicating the name of the rotation indicator (e.g. \dQuote{booklet}) in the dataset.
}
\item{suppressErr}{
Logical passed on to function \code{\link{aggregateData}} indicating whether aggregated cells with \code{err} should be recoded to another value..
}
\item{recodeErr}{Character vector of length 1 passed on to function \code{\link{aggregateData}} indicating to which \code{err} should be recoded. This argument is only evaluated when \code{suppressErr = TRUE} }.
\item{missing.rule}{
A named list with definitions how to recode the different types of missings in the dataset. If \code{writeSPSS = TRUE}, missing values will be recoded to 0 or \code{NA} prior to writing the SPSS dataset. See \code{\link{collapseMissings}} for supported missng values.}
\item{aggregatemissings}{
A symmetrical \emph{n x n} matrix or a data frame from \code{inputList$aggrMiss} passed on to function \code{\link{aggregateData}} with information on how missing values should be aggregated. If no matrix is given, the default will be used. See 'Details' in \code{\link{aggregateData}}.
}
\item{rename}{Logical passed on to function \code{\link{aggregateData}} indicating whether units with only one subunit should be renamed to their unit name? Default is \code{FALSE}.}
\item{recodedData}{Logical passed on to function \code{\link{aggregateData}}indicating whether colnames in \code{dat} are the subunit names (as in \code{subunits$subunit}) or recoded subunit names (as in \code{subunits$subunitRecoded}). Default is \code{TRUE}, meaning that colnames are recoded subitem names.}
\item{addLeadingZeros}{
logical. See \code{\link{readSpss}}.
}
\item{truncateSpaceChar}{
logical. See \code{\link{readSpss}}.
}
\item{newID}{
A character string containing the case IDs name in the final data frame. Default
is \code{ID} or a character string specified in \code{inputList$newID}.
}
\item{oldIDs}{
A vector of character strings containing the IDs names in the original SPSS datasets.
Default is as specified in \code{inputList$savFiles}.
}
\item{verbose}{
Logical: If \code{TRUE}, progress and additional information is printed.
}
}
\value{
A data frame resulting from the final data preparation step.
}
\author{
Karoline Sachse
}
\examples{
data(inputList)
data(inputDat)
preparedData <- automateDataPreparation(inputList = inputList,
datList = inputDat, path = getwd(),
readSpss = FALSE, checkData = TRUE, mergeData = TRUE,
recodeData = TRUE, recodeMnr = TRUE, breaks = c(1,2),
aggregateData = TRUE, scoreData = TRUE,
writeSpss = FALSE, verbose = TRUE)
}
|
27db6e0f0b13ef57a8ea7587dbf32101f68b8994
|
a70ed9684eeb7a6b50dbba62e7979e7ab865b969
|
/expert_mendel_sheridan.R
|
796f968197797fd2ba5ed2e32df442222f2950c9
|
[] |
no_license
|
miller00315/estudos_r
|
b95ecbcd297c6b1e6cd9d30f659df002994de8f1
|
3ade710540393b6d1e5116191a45efc4a4440e51
|
refs/heads/master
| 2022-12-18T16:12:16.944044
| 2020-09-26T15:20:54
| 2020-09-26T15:20:54
| 297,691,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
expert_mendel_sheridan.R
|
install.packages("expert")
library(expert)
x <-list(
EXP1 <-list(
SEM1 <- c(75, 80, 85),
SEM2 <- c(10,15,20),
INT <- c(650, 800,850)
),
EXP2 <-list(
SEM1 <- c(80, 90, 95),
SEM2 <- c(25,30,35),
INT <- c(500, 600,700)
),
EXP3 <-list(
SEM1 <- c(65, 70, 80),
SEM2 <- c(20,25,30),
INT <- c(450, 650,800)
)
)
prob <- c(0.1, 0.5, 0.9)
semverd <- c(80, 25)
inf <- expert(x, "ms", prob, semverd)
inf
hist(inf, col= "blue")
par(bg = "white")
split.screen(c(2,2))
screen(1)
hist(inf, col = "gray", main = "Distribuição agregada")
screen(2)
s = density(c(650, 800, 850))
plot(s, main ="Especialista 1")
polygon(s, col = "blue")
screen(3)
s = density(c(500, 600, 700))
plot(s, main ="Especialista 2")
polygon(s, col = "blue")
screen(4)
s = density(c(450, 650, 800))
plot(s, main ="Especialista 3")
polygon(s, col = "blue")
close.screen(all.screens = TRUE)
summary(inf)
quantile(inf)
mean(inf)
dc = cdf(inf)
plot(dc)
og = ogive(inf)
plot(og)
|
61bbaec3c75cfd57e9ca024cdb223bdd5762c548
|
552d495af4b801425da2b2b82df006ea4819184a
|
/phoneme/phoneme.R
|
2c1bc55e15435f9ccc445e28970e85acfc28c3d7
|
[] |
no_license
|
nihaoHX/Logistic_subsampling
|
c85fe986bfcd263b9e82bf55a607b1eb2acb3af8
|
52f024678d54321151d0eddf1df3f1f8bda768cb
|
refs/heads/main
| 2023-02-07T12:09:08.348456
| 2021-01-04T16:16:18
| 2021-01-04T16:16:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,214
|
r
|
phoneme.R
|
################ Real data: phoneme ################
library(nabor)
library(mined)
library(mvtnorm)
library(OSMAC)
library(ggplot2)
library(foreach)
library(doParallel)
library(readr)
library(splines)
source("more_efficient")
#source("MED")
source("IBOSSL")
source("Leverage")
source("DKDO")
### data treatment
phoneme=read_csv("Logistic/Main/phoneme/phoneme.data")
Y=phoneme$speaker
X=phoneme[,-c(1,258)]
idaa=which(Y=="aa")
iddcl=which(Y=="dcl")
idsh=which(Y=="sh")
idiy=which(Y=="iy")
#idao=which(Y=="ao")
DataX=X[c(idaa,iddcl,idsh,idiy),]
DataY=Y[c(idaa,iddcl,idsh,idiy)]
idi=c(1:2,696,697,2325:2327,1453:1455)
Dataidi=data.frame(freq=rep(c(1:256),each=10),
logp=c(as.matrix(DataX[idi,])),
class=factor(rep(c("aa","aa","dcl","dcl","iy","iy","iy","sh","sh","sh"),times=256)))
Pidi=ggplot(Dataidi,aes(x=freq,y=logp,colour=class))+
geom_line(aes(linetype=class))+
scale_linetype_manual(values=c(1,2,1,2))+
scale_color_manual(values=c("#4DAF4A","#4DAF4A","#E41A1C","#E41A1C"))+
xlab("Frequency")+ylab("log-periodogram")+theme_bw()+
theme(axis.text.y = element_text(angle=90),
legend.justification=c(1,1),
legend.position = c(1,1),
legend.background = element_rect(colour = "black"))
Pidi
H=ns(1:256,df=8)
HX=t(t(H)%*%t(DataX))
HY=c(rep(0,length(idaa)+length(iddcl)),rep(1,length(idsh)+length(idiy)))
id=sample(1:nrow(HX),nrow(HX),replace = FALSE)
Xtrain=HX[id,]
Xtrain=t(t(Xtrain) / apply(Xtrain, 2, sd))
Ytrain=HY[id]
Dtrain=as.data.frame(cbind(Xtrain,Ytrain))
a=glm(Ytrain~.,family = binomial,data=Dtrain)
summary(a)
# 6 is not
Xtrain=Xtrain[,-6]
Dtrain=as.data.frame(cbind(Xtrain,Ytrain))
a=glm(Ytrain~.,family = binomial,data=Dtrain)
summary(a)
###### comparison
subseq=c(300,500,700,1000,1200,1500)
Main=function(Xtrain,Ytrain,subseq,seed){
set.seed(seed)
it=sample(1:nrow(Xtrain),2500)
X=Xtrain[it,]
Y=Ytrain[it]
Xt=Xtrain[-it,]
Yt=Ytrain[-it]
### full data
t1=proc.time()
full=getMLE(cbind(1,X),Y,1)$par
tfull=(proc.time()-t1)[[3]]
pfull=1/(1+exp(-cbind(1,Xt)%*%full))
yfull=round(pfull)
errfull=mean((Yt-yfull)^2)
### OSMAC
Osm=function(sub){
t1=proc.time()
r=twostep(cbind(1,X),Y,200,sub-200,"mmse")$par
t=(proc.time()-t1)[[3]]
return(list(res=r,time=t))}
osm=sapply(subseq,Osm,simplify = FALSE)
errosm=tosm=rep(NA,6)
parosm=matrix(NA,6,8)
for(i in 1:6){
if(is.null(osm[[i]]$res[1])){
}
else{
parosm[i,]=osm[[i]]$res
posm=1/(1+exp(-cbind(1,Xt)%*%osm[[i]]$res))
yosm=round(posm)
errosm[i]=mean((Yt-yosm)^2)
tosm[i]=osm[[i]]$time
}
}
### MORE
Mor=function(sub){
t1=proc.time()
r=ME(cbind(1,X),Y,200,sub-200)$par
t=(proc.time()-t1)[[3]]
return(list(res=r,time=t))}
mor=sapply(subseq,Mor,simplify = FALSE)
errmor=tmor=rep(NA,6)
parmor=matrix(NA,6,8)
for(i in 1:6){
if(is.na(mor[[i]]$res[1])){
}
else{
parmor[i,]=mor[[i]]$res
pmor=1/(1+exp(-cbind(1,Xt)%*%mor[[i]]$res))
ymor=round(pmor)
errmor[i]=mean((Yt-ymor)^2)
tmor[i]=mor[[i]]$time
}
}
### IBOSS
Ibo=function(sub){
t1=proc.time()
r=IBOSSL(X,Y,200,sub-200,2.5,"Yes")$par
t=(proc.time()-t1)[[3]]
return(list(res=r,time=t))}
ibo=sapply(subseq,Ibo,simplify = FALSE)
erribo=tibo=rep(NA,6)
paribo=matrix(NA,6,8)
for(i in 1:6){
if(is.na(ibo[[i]]$res[1])){
}
else{
paribo[i,]=ibo[[i]]$res
pibo=1/(1+exp(-cbind(1,Xt)%*%ibo[[i]]$res))
yibo=round(pibo)
erribo[i]=mean((Yt-yibo)^2)
tibo[i]=ibo[[i]]$time
}
}
### Leverage: Pilot
Lev=function(sub){
t1=proc.time()
r=LevDeter_Pilot(cbind(1,X),Y,200,sub-200)$par
t=(proc.time()-t1)[[3]]
return(list(res=r,time=t))}
lev=sapply(subseq,Lev,simplify = FALSE)
errlev=tlev=rep(NA,6)
parlev=matrix(NA,6,8)
for(i in 1:6){
if(is.na(lev[[i]]$res[1])){
}
else{
parlev[i,]=lev[[i]]$res
plev=1/(1+exp(-cbind(1,Xt)%*%lev[[i]]$res))
ylev=round(plev)
errlev[i]=mean((Yt-ylev)^2)
tlev[i]=lev[[i]]$time
}
}
### DKDO: pilot
Dkd=function(sub){
t1=proc.time()
r=DKDO_pilot(cbind(1,X),Y,200,sub-200)$par
t=(proc.time()-t1)[[3]]
return(list(res=r,time=t))}
dkd=sapply(subseq,Dkd,simplify = FALSE)
errdkd=tdkd=rep(NA,6)
pardkd=matrix(NA,6,8)
for(i in 1:6){
if(is.na(dkd[[i]]$res[1])){
}
else{
pardkd[i,]=dkd[[i]]$res
pdkd=1/(1+exp(-cbind(1,Xt)%*%dkd[[i]]$res))
ydkd=round(pdkd)
errdkd[i]=mean((Yt-ydkd)^2)
tdkd[i]=dkd[[i]]$time
}
}
return(list(Fullpar=full,Fullerr=errfull,Fullt=tfull,Osmpar=parosm,Osmerr=errosm,Osmt=tosm,
Morpar=parmor,Morerr=errmor,Mort=tmor,Ibopar=paribo,Iboerr=erribo,Ibot=tibo,
Levpar=parlev,Leverr=errlev,Levt=tlev,Dkdpar=pardkd,Dkderr=errdkd,Dkdt=tdkd))
}
cl<- makeCluster(15)
registerDoParallel(cl)
Resultpm=foreach(i=1:1000,
.combine=cbind,
.packages=c("mvtnorm","nabor","mined")) %dopar% Main(Xtrain,Ytrain,subseq,666*i+18)
stopCluster(cl)
fullpar=matrix(0,1000,8)
fullerr=fullt=rep(NULL,1000)
for(i in 1:1000){
fullpar[i,]=Resultpm[,i]$Fullpar
fullerr[i]=Resultpm[,i]$Fullerr
fullt[i]=Resultpm[,i]$Fullt
}
Fullpar=apply(fullpar,2,mean)
#[1] -12.601923 16.442459 -3.021905 4.867152 -3.910880 2.733905 -9.483273 6.170050
Fullerr=mean(fullerr)
#[1] 0.02580547
osmpar=osmerr=osmt=morpar=morerr=osmt=ibopar=iboerr=ibot=matrix(NA,1000,6)
levpar=leverr=levt=dkdpar=dkderr=dkdpar=matrix(NA,1000,6)
for(i in 1:1000){
t=Resultpm[,i]
osmerr[i,]=t$Osmerr
morerr[i,]=t$Morerr
iboerr[i,]=t$Iboerr
leverr[i,]=t$Leverr
dkderr[i,]=t$Dkderr
osmt[i,]=t$Osmt
mort[i,]=t$Mort
ibot[i,]=t$Ibot
levt[i,]=t$Levt
dkdt[i,]=t$Dkdt
for(j in 1:6){
if(is.na(t$Osmt[j])){
}
else{
osmpar[i,j]=sum((t$Osmpar[j,]-Fullpar)^2)
}
if(is.na(t$Mort[j])){
}
else{
morpar[i,j]=sum((t$Morpar[j,]-Fullpar)^2)
}
if(is.na(t$Ibot[j])){
}
else{
ibopar[i,j]=sum((t$Ibopar[j,]-Fullpar)^2)
}
if(is.na(t$Levt[j])){
}
else{
levpar[i,j]=sum((t$Levpar[j,]-Fullpar)^2)
}
if(is.na(t$Dkdt[j])){
}
else{
dkdpar[i,j]=sum((t$Dkdpar[j,]-Fullpar)^2)
}
}
}
mean1=function(x){return(mean(x,na.rm=TRUE))}
sd1=function(x){return(sd(x,na.rm=TRUE))}
data_pm=data.frame(nNA=c(apply(apply(osmpar,2,is.na),2,mean),apply(apply(morpar,2,is.na),2,mean),apply(apply(ibopar,2,is.na),2,mean),
apply(apply(levpar,2,is.na),2,mean),apply(apply(dkdpar,2,is.na),2,mean),rep(0,6)),
parmse=c(apply(osmpar,2,mean1),apply(morpar,2,mean1),apply(ibopar,2,mean1),
apply(levpar,2,mean1),apply(dkdpar,2,mean1),rep(mean(apply((fullpar-rep(1,1000)%*%t(Fullpar))^2,1,sum)),6)),
parsd=c(apply(osmpar,2,sd1),apply(morpar,2,sd1),apply(ibopar,2,sd1),
apply(levpar,2,sd1),apply(dkdpar,2,sd1),rep(sd(apply((fullpar-rep(1,1000)%*%t(Fullpar))^2,1,sum)),6)),
errmean=c(apply(osmerr,2,mean1),apply(morerr,2,mean1),apply(iboerr,2,mean1),
apply(leverr,2,mean1),apply(dkderr,2,mean1),rep(Fullerr,6)),
errsd=c(apply(osmerr,2,sd1),apply(morerr,2,sd1),apply(iboerr,2,sd1),
apply(leverr,2,sd1),apply(dkderr,2,sd1),rep(sd(fullerr),6)),
tmean=c(apply(osmt,2,mean1),apply(mort,2,mean1),apply(ibot,2,mean1),
apply(levt,2,mean1),apply(dkdt,2,mean1),rep(Fullt,6)),
Method=factor(rep(c("OSMAC","MORE","IBOSS","LEV","DKDO","FULL"),each=6)),
k=rep(c(300,500,700,1000,1200,1500),times=6))
write.csv(data_pm,"Logistic/Main/phoneme/data_pm.csv")
idd=c(7:18)
p_pmpar=ggplot(data_pm[-idd,],aes(x=k,y=log(parmse),group=Method,colour=Method))+
theme_bw()+theme(axis.text.y = element_text(angle=90),
legend.justification=c(1,1),
legend.position = c(1,1),
legend.background = element_rect(colour = "black"))+
geom_line(aes(linetype=Method))+
geom_point(aes(shape=Method),size=2)+scale_shape_manual(values=c(1,2,4,6))+
scale_linetype_manual(values=c(1,1,1,1))+
scale_color_manual(values=c("#4DAF4A","orange","#E41A1C","#377EB8"))+
xlab("k")+ylab("log(MSE)")
p_pmpar
p_pmerr=ggplot(data_pm,aes(x=k,y=errmean,colour=Method))+
theme_bw()+theme(axis.text.y = element_text(angle=90),
legend.justification=c(1,1),
legend.position = c(1,1),
legend.background = element_rect(colour = "black"))+
geom_line(aes(linetype=Method))+
geom_point(aes(shape=Method),size=2)+scale_shape_manual(values=c(1,2,3,4,5,6))+
scale_linetype_manual(values=c(1,1,1,1,1,1))+
scale_color_manual(values=c("#4DAF4A","orange","#A65628","#E41A1C","#984EA3","#377EB8"))+
xlab("k")+ylab("PER")
p_pmerr
p_pmpar=ggplot(data_pm[-idd,],aes(x=k,y=parmse,group=Method,colour=Method))+
theme_bw()+theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = "black"),
axis.text=element_text(size=12),
axis.text.y = element_text(angle=90,size=12),
axis.title=element_text(size=14),
legend.justification=c(1,1),
legend.position = c(1,1),
legend.title = element_blank(),
legend.background = element_rect(colour = "black"),
legend.key.width=unit(2,"line"),
legend.key.height=unit(1,"line"))+
geom_line(aes(linetype=Method), position=pd)+
geom_point(position=pd,size=1)+scale_shape_manual(values=c(1,1,1,1,1,1,1,1,1,1))+
scale_linetype_manual(values=c(1,2,3,1,1,1,2,3,1,1))+
scale_size_manual(values=c(1,1,1,1,1,1,1,1,1,1))+
scale_color_manual(values=c("#4DAF4A","#4DAF4A","#4DAF4A","black","#A65628","#E41A1C","#E41A1C","#E41A1C","#377EB8","#984EA3"))+
xlab("k")+ylab("MSE")
p_pmpar
p_pmerr=ggplot(data_pm,aes(x=k,y=errmean,colour=Method))+theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = "black"),
axis.text=element_text(size=12),
axis.text.y = element_text(angle=90,size=12),
axis.title=element_text(size=14),
legend.justification=c(1,1),
legend.position = c(1,1),
legend.title = element_blank(),
legend.background = element_rect(colour = "black"),
legend.key.width=unit(2,"line"),
legend.key.height=unit(1,"line"))+
geom_line(aes(linetype=Method), position=pd)+
geom_point(position=pd,size=1)+scale_shape_manual(values=c(1,1,1,1,1,1,1,1,1,1))+
scale_linetype_manual(values=c(1,2,3,1,1,1,2,3,1,1))+
scale_size_manual(values=c(1,1,1,1,1,1,1,1,1,1))+
scale_color_manual(values=c("#4DAF4A","#4DAF4A","#4DAF4A","black","#A65628","#E41A1C","#E41A1C","#E41A1C","#377EB8","#984EA3"))+
xlab("k")+ylab("MSE")
p_pmerr
|
4faa81a0354f24949f9657c3f065fc77dd2c4b33
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/1999.12.06.editorial.19652.0206.r
|
c2f9b9c1b800cf33e3d1166faa55306ab7a582e0
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,245
|
r
|
1999.12.06.editorial.19652.0206.r
|
Atrocitatile si agresiunile sexuale de la Suceava au socat Romania .
oamenii sint consternati .
de citeva decenii , romanii n - au mai fost pusi in fata unui asemenea caz .
de la psihozele cu Rimaru ( in Bucuresti ) si cu ciocanarul ( la Cluj ) , opinia publica n - a mai trait sentimente atit de puternice .
cu ceva timp in urma , Belgia a trecut si ea printr - o situatie asemanatoare , care a tinut prima pagina a ziarelor vreme de un an .
pe linga patetismul reactiilor generate de oroarea faptelor , au inceput sa apara si excesele .
unii s - au grabit sa ceara reintroducerea pedepsei cu moartea sau chiar linsarea vinovatului .
pericolul e sa ne lasam dusi cu totii in isterie . Pe zi ce trece sa descoperim si mai multe victime si sa ne inflamam , pierzindu - ne masura si echilibrul .
de regula , astfel de intimplari creeaza o presiune sociala teribila , iar reactiile sint numai pe termen scurt .
violentele de la Suceava sint rezultatul unui accident de mari proportii .
ne aflam , fara indoiala , in fata unui bolnav psihic .
descoperirea si prinderea lui devin o chestiune vitala si pentru comunitate , si pentru autoritati .
dar gravitatea celor petrecute nu ne poate impiedica sa vedem esentialul .
abuzurile sexuale ( si nu doar acestea ) sint o problema ignorata . Si de autoritati , si de familie , si de societatea civila .
coplesita de numarul mare de infractiuni , politia nu mai stie cu ce sa inceapa .
cu omorurile , cu criminalitatea economico - financiara sau cu cea din rindul minorilor ?
pe linga toate acestea , identificarea abuzurilor sexuale se mai impiedica si de pudoarea romanilor de a vorbi despre sex .
si astfel am ajuns in situatia de a nu sti care sint proportiile reale ale fenomenului .
faptul ca mediile straine incep sa vorbeasca despre Romania ca despre un paradis al pedofililor e un semn .
la fel , mentionarea copiilor institutionalizati in rapoartele Uniunii Europene ne obliga sa recunoastem ca nu mai sintem doar in fata unui accident revoltator , ci ca el a fost posibil pe fondul unor multiple cauze .
romanii s - au obisnuit sa - i vada pe copiii strazii , fara sa se intrebe ce se intimpla cu ei .
in schimb , sint socati cind afla ca cei mai multi sint activi din punct de vedere sexual ( si este vorba de copii intre 7 si 12 ani ) .
romanii s - au obisnuit , de pe vremea lui nea Nicu , sa nu poarte prea multa grija copiilor din familie .
multi dintre ei cresc cu cheia de git si nu - i intreaba nimeni ce li se intimpla .
Pudoarea de a discuta despre sex ii determina pe multi sa ingroape in tacere asemenea abuzuri .
or , toate acestea nu se inlatura nici prin introducerea pedepsei cu moartea , nici printr - o indignare demagogica .
situatii dramatice , de genul celei de la Suceava , vor mai aparea , poate , peste ani , la noi sau aiurea .
bolnavi mintal care sa nu fie depistati la prima infractiune au fost si vor mai fi si in tarile civilizate .
mult mai util ar fi sa indepartam zgura si detaliile din aceasta situatie nenorocita pentru a ajunge la cauzele adinci ale fenomenului . Iar el trebuie privit din perspectiva masurilor preventive .
e greu sa construim un mecanism care sa impiedice aparitia unui asemenea nebun , dar e mult mai necesar sa redescoperim cauza neglijata a copiilor .
sa ne implicam in protectia lor , in preluarea de catre autoritatile statului si de catre societatea civila a fenomenului numit " copiii strazii " .
pe multi , drama celor patru copii de la Suceava ( s - ar putea sa fie vorba de mai multi ) i - a determinat sa priveasca altfel educatia si ingrijirea propriilor odrasle .
tot drama de la Suceava ne - a reamintit ce se petrece cu acest fenomen ignorat de atitia ani .
Captivati de politica si de grijile de fiecare zi , am trecut in plan secund abuzurile asupra minorilor , violentele la care sint expusi .
daca aceasta drama va fi redusa doar la o disputa legata de pedeapsa ce trebuie aplicata vinovatului , inseamna ca n - am facut nimic .
daca insa , in acest moment , vom aborda subiectul temeinic , cu participarea tuturor , atunci am putea spera ca , intr - o buna zi , numarul copiilor expusi violentelor va fi mult mai mic .
abia atunci vom putea spune ca am facut ceea ce trebuia facut .
|
9e6f6427deb23ad68a459b0ed9f3b34975e1fba2
|
e507b9f3094ff40cfc4359d2647a9dcdeb3bf712
|
/src/00_01_date processing.R
|
8e39624860ae554c126540f1d3bc3a66796f95bd
|
[] |
no_license
|
DomHenry/sdm-pipeline
|
2bb8ef25f730f4fb51bbbc5435356989b54c63a9
|
0049728a68053d156c52753ce58decabdd48f192
|
refs/heads/master
| 2020-06-12T14:03:31.577533
| 2019-08-08T10:43:01
| 2019-08-08T10:43:01
| 194,322,619
| 0
| 0
| null | 2019-07-18T09:27:03
| 2019-06-28T19:12:35
|
R
|
UTF-8
|
R
| false
| false
| 7,849
|
r
|
00_01_date processing.R
|
# Description -------------------------------------------------------------
## Thu Jan 10 10:47:30 2019
## Import the original amphibian database, subset to include only RL, NT and DD
## species. Remove duplicate records then work on dates. Create 4 DFs with
## different date combinations (full date, month & year, year, no valid date).
## Write workspace with DF that has a column with full date and NA for any
## other date that contains errors in day, month or year. Also write csvs
## for remaing 3 date combinations.
library(tidyverse)
library(lubridate)
library(gghighlight)
library(gridExtra)
library(ggpubr)
library(sf)
library(viridis)
library(gt)
library(glue)
walk(dir("src/functions/", full.names = TRUE),
source)
# Import data -------------------------------------------------------------
## Red-listed endemic species
rl <- low_rm_space(read_csv("data input/vertebrate_threatend_endemic.csv")) %>%
rename(rls = `regional_red-list_status`) %>%
filter(class == "Amphibia")
## Original database from John Measey (records up until 2015)
amph_all <- low_rm_space(read_csv("data input/SA_Amphibians_All_data_full.csv")) %>%
mutate(genus = ifelse(genus == "Amietophrynus","Sclerophrys",genus)) %>% # New genus for Leopard Toads
mutate(scientificname = ifelse(str_detect(scientificname,"Amietophrynus"),
str_replace(scientificname,"Amietophrynus", "Sclerophrys"),
scientificname))
## Check for duplicate rows upfront
print(glue::glue("{nrow(distinct(amph_all))/nrow(amph_all)*100}% UNIQUE RECORDS"))
## Create IUCN species vector
spp <- sort(rl$latin_name)
# Select columns and fiter RL species -------------------------------------
amph <- amph_all %>%
select(objectid, order,family,genus,scientificname,decimallatitude,decimallongitude,coordinateprecision,
basisofrecord,year,month,day,coord_notes,qds,errors,error_notes) %>%
filter(scientificname %in% spp)
# Investigate duplicates --------------------------------------------------
distinct(amph %>% select(-objectid)) #Check for duplicate rows again when objectid column is removed
print(glue::glue("{round(nrow(distinct(amph %>% select(-objectid)))/nrow(amph %>% select(-objectid))*100,2)}% UNIQUE RECORDS"))
## Create a folder for errors
err_dir <- c("data output/occurence record errors")
dir.create(err_dir)
### See example for various ways to filter duplicates ###
test <- tibble(ob = c("id1","id2","id3","id4"),
val1 = c(1,2,2,1),
val2 = c(1,3,4,1)) %>%
group_by_at(vars(-ob))
test
test %>% filter(n() > 1) # Keep non-distinct only
test %>% filter(n() == 1) # Exclude any non-distinct
test %>% filter(row_number() == 1) # Select the first row of duplicated values
###
## Write duplicates to file (non-distinct values)
amph %>%
group_by_at(vars(-objectid)) %>%
filter(n() > 1) %>% # keep non-distinct values only (i.e. duplicates)
arrange(order,family,genus,scientificname,
decimallatitude,decimallongitude,
basisofrecord,coord_notes) %>%
write_csv(glue("{err_dir}/duplicate_records.csv"))
## Remove duplicates from remainder of analysis (select distinct rows)
amph <- amph %>%
group_by_at(vars(-objectid)) %>% #i.e. remove objectID before grouping (equivalent group_by_at(vars(order:error_notes))
filter(row_number() == 1) %>%
ungroup()
# DF with full date (no errors) -------------------------------------------
amph_dmy <- amph %>%
filter(!(year == 0 | is.na(year) | year < 1800 |
month == 0 | is.na(month) | month > 12 |
day == 0 | is.na(day) | day > 31)) %>%
filter(!(month == 2 & day > 28)) %>% # Remove dates like 30th of Feb and 31st of Nov
filter(!(month == 11 & day > 30)) %>%
mutate(date = dmy(str_c(day,month,year,sep = "-")))
# DF with valid month and year (assign day = 1) ---------------------------
amph_my <- amph %>%
filter(!(year == 0 | is.na(year) | year < 1800 |
month == 0 | is.na(month) | month > 12)) %>%
mutate(day = 1) %>%
mutate(date = dmy(str_c(day,month,year,sep = "-")))
# DF with valid year (assign month = 1 and day = 1) -----------------------
amph_y <- amph %>%
filter(!(year == 0 | is.na(year) | year < 1800)) %>%
mutate(day = 1, month = 1) %>%
mutate(date = dmy(str_c(day,month,year,sep = "-")))
# DF with no date information ---------------------------------------------
amph_nd <- amph %>%
filter(year == 0 | is.na(year) | year < 1800 |
month == 0 | is.na(month) | month > 12 |
day == 0 | is.na(day) | day > 31 |
month == 2 & day >28 |
month == 11 & day > 30)
## Check
nrow(amph) == nrow(amph_dmy) + nrow(amph_nd)
nrow(amph) == nrow(amph_dmy) + nrow(amph_my) + nrow(amph_y)
# Create table summarising date time relevant records ---------------------
amph_date_table <- amph_nd %>%
group_by(scientificname) %>%
tally %>%
arrange %>%
left_join(rl %>% select(latin_name,rls), by = c("scientificname" = "latin_name")) %>%
rename(no_date = n) %>%
full_join(amph_dmy %>%
filter(scientificname %in% unique(amph_nd$scientificname)) %>%
group_by(scientificname) %>%
tally %>%
arrange %>%
rename(with_date = n),
by = "scientificname") %>%
mutate(total_records = no_date + with_date,
prop_no_date = round((no_date/(total_records))*100,2)) %>%
print(n = 29)
# Merge DFs ---------------------------------------------------------------
amph <- full_join(amph, amph_dmy)
# Create validity variables for each date component -----------------------
amph <- amph %>%
mutate(year_check = ifelse(year == 0 | is.na(year) | year < 1800, "invalid","valid")) %>%
mutate(month_check = ifelse(month == 0 | is.na(month) | month > 12, "invalid","valid")) %>%
mutate(day_check = ifelse(day == 0 | is.na(day) | day > 31 , "invalid","valid"))
# Write csvs --------------------------------------------------------------
## Create output directory
out_dir <- c("data output/occurence record dates")
dir.create(out_dir)
write_csv(amph_date_table, glue("{out_dir}/occ_records_date_summary.csv"))
write_csv(amph_my, glue("{out_dir}/amph_occ_records_month_year.csv"))
write_csv(amph_y, glue("{out_dir}/amph_occ_records_year.csv"))
write_csv(amph_nd, glue("{out_dir}/amph_occ_records_no_date.csv"))
# Amph date table in GT package -------------------------------------------
amph_gt <- amph_date_table %>%
gt() %>%
tab_header(
title = md("**Breakdown of occurence records and date errors**"),
subtitle = md(glue("*Includes data for {length(spp)} Amphibian species*"))) %>%
tab_source_note(source_note = md("*Analysis based on database provided by John Measey*")) %>%
cols_move_to_start(columns = vars(scientificname,rls,total_records,with_date,no_date, prop_no_date)) %>%
cols_label(scientificname = md("**Species**"),
rls = md("**Red-list status**"),
total_records = md("**Total records**"),
with_date = md("**Valid date**"),
prop_no_date = md("**No date information (%)**"),
no_date = md("**No date information**"))
amph_gt <- amph_gt %>% # Need to do this in two steps for some reason
tab_style(style = cells_styles(text_style = "italic"),
locations = cells_data(columns = vars(scientificname))) %>%
tab_style(style = cells_styles(text_align = "center"),
locations = cells_data(columns = vars(total_records,with_date,no_date,prop_no_date)))
# Can't find a way to automatically write this html to file (can do it via the Viewer tab)
# Write workspaces --------------------------------------------------------
save(list = c("amph","amph_all","rl","spp","err_dir"), file = "data output/amph_data_clean.RData")
|
27baba6493aa1cdd38a073c1fcb5da2c28c25f78
|
6a7f09929503ff62efad8bf049119d5fabd5b6d6
|
/man/nonet_plot.Rd
|
d8514a12b8b2ef3bff207fc320f9136a210a1e91
|
[] |
no_license
|
GSLabDev/nonet
|
514aa1dd1fdae7eb123b88a7a8524ac3e41b753f
|
66b416211abbde4455e029e22aea1e879cec358b
|
refs/heads/master
| 2020-04-12T10:05:19.679235
| 2019-01-03T11:26:02
| 2019-01-03T11:26:02
| 162,418,570
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,847
|
rd
|
nonet_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonet_plot.R
\name{nonet_plot}
\alias{nonet_plot}
\title{Plot the predictions or results of nonet_ensemble}
\usage{
nonet_plot(x, y, dataframe, plot_type = NULL, nonet_size = 20,
nonet_alpha = 0.3, nonet_bins = 25)
}
\arguments{
\item{x}{x axis variable name or histogram entity name}
\item{y}{y axis variable name}
\item{dataframe}{dataframe which is used for plotting purpose.}
\item{plot_type}{type of plot, if not provided it takes "NULL"}
\item{nonet_size}{size of plot need to feed in ggplot}
\item{nonet_alpha}{value of alpha for ggplot}
\item{nonet_bins}{number of bins for histogram}
}
\value{
plotted for the plot results provided as input.
}
\description{
Plot the predictions or results of nonet_ensemble
}
\examples{
# nonet_plot functionality can be explained via below example
# Setup
library(caret)
library(nonet)
library(ggplot2)
# Load Data
dataframe <- data.frame(banknote_authentication[600:900, ])
dataframe$class <- as.factor(ifelse(dataframe$class >= 1, 'Yes', 'No'))
# Spliting into train and test
index <- createDataPartition(dataframe$class, p=0.75, list=FALSE)
trainSet <- dataframe[ index,]
testSet <- dataframe[-index,]
# Feature selection
control <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 2,
verbose = FALSE)
outcomeName <- 'class'
predictors <- c("curtosis", "entropy")
# Model Training & predictions
banknote_rf <- train(trainSet[,predictors],trainSet[,outcomeName],method='rf')
predictions_rf_raw <- predict.train(object=banknote_rf,testSet[,predictors],type="raw")
# Results
nonet_eval_rf <- confusionMatrix(predictions_rf_raw,testSet[,outcomeName])
eval_rf_df <- data.frame(nonet_eval_rf$table)
nonet_plot(eval_rf_df$Prediction, eval_rf_df$Reference, eval_rf_df, plot_type = "point")
}
|
f87a3dbef3a02222ebabe28f369d570c82e93f5a
|
7590a2ceba0efdc130c5d7631617e4d829016d5c
|
/man/ggHorizBar.Rd
|
109b2957b416c34337f38e1f8f1f1d5076d2e289
|
[] |
no_license
|
andymckenzie/bayesbio
|
e52b8bfb46d32d04373a3161f6a9722b47af8e32
|
1389283ba9ac8e1778dd7930af35e719a3baf540
|
refs/heads/master
| 2021-01-17T12:44:10.735034
| 2019-06-11T16:24:17
| 2019-06-11T16:24:17
| 59,575,223
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,013
|
rd
|
ggHorizBar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggHorizBar.R
\name{ggHorizBar}
\alias{ggHorizBar}
\title{Create a color-labeled horizontal bar plot in ggplot2.}
\usage{
ggHorizBar(data_df, dataCol, namesCol, labelsCol, decreasing = TRUE)
}
\arguments{
\item{data_df}{Data frame with columns to specify the data values, the row names, and the fill colors of each of the bars.}
\item{dataCol}{The column name that specifies the values to be plotted.}
\item{namesCol}{The column name that specifies the corresponding names for each of the bar plots to be plotted.}
\item{labelsCol}{The column name that specifies the groups of the labels.}
\item{decreasing}{Logical specifying whether the values in dataCol should be in decreasing order.}
}
\value{
A ggplot2 object, which can be plotted via the plot() function or saved via the ggsave() function.
}
\description{
This function takes a data frame and creates a horizontal (by default) bar plot from it while ordering the values.
}
|
11747e59e510c99277d5c96b5b7d255befbb61bb
|
950030f19c1368f889700299bc36ecf7104f56b8
|
/tests/testthat/test_is_authorized.R
|
1e4e0d602cb63eab224f304aa237a47e57abd3cf
|
[
"MIT"
] |
permissive
|
ropensci/EDIutils
|
0cadce6b8139417fcfa65194e7caf8c77ea087af
|
b1f59cccee3791a04d7702bcb37f76995ae2fcbe
|
refs/heads/main
| 2023-05-22T09:49:03.633710
| 2022-09-09T16:12:30
| 2022-09-09T16:12:30
| 159,572,464
| 2
| 1
|
NOASSERTION
| 2022-11-21T16:22:08
| 2018-11-28T22:13:59
|
R
|
UTF-8
|
R
| false
| false
| 267
|
r
|
test_is_authorized.R
|
context("Is authorized")
testthat::test_that("is_authorized() works", {
url <- "https://pasta.lternet.edu/package/report/eml/knb-lter-sbc/6006/3"
vcr::use_cassette("is_authorized", {
res <- is_authorized(url)
})
expect_true(class(res) %in% "logical")
})
|
7755d7ef1f83a6b3ad975b6a6ae8c19da2459fe7
|
295ab607a406c3c4d0c24e3b162ddfa066086cbd
|
/R/aniplotevents.R
|
e7cef72ae285ed08e20fce0feaa1775c25408ead
|
[] |
no_license
|
ykang/TED
|
34b5e528d43af774df9b3812b851b21aeff3425e
|
6384c692d131817ec60243fa256feb85c6d86de4
|
refs/heads/master
| 2016-09-16T13:13:15.144382
| 2014-05-10T10:23:26
| 2014-05-10T10:23:26
| 19,638,723
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,187
|
r
|
aniplotevents.R
|
#' Generate a gif to visualise the event detection process
#'
#' This function generates a gif file demonstrating how the event detection process is implemented.
#'
#' @param x a vector or a time series.
#' @param w a scalar specifying the size of the sliding window.
#' @param noiseType background noise type assumed for x. There are two options: white noise or red noise.
#' @param alpha the significance level. When the noise test p value of the subsequence is smaller than this significance level,
#' it is defined as a potential event.
#' @param main title of the animiation plot; default is `Animation plot of event detection'.
#' @param xlab x label of the animation plot; default is `t'.
#' @param ylab y label of the animation plot; default is `x'.
#' @param movie.name name of the output gif file; default is `animation.gif'.
#' @param interval a positive number to set the time interval of the animation (unit in seconds); default is 0.05.
#' @param ani.width width of the gif file (unit in px), default is 1000.
#' @param ani.height height of the gif file (unit in px); default is 400.
#' @param outdir character: specify the output directory when exporting the animations; default to be the
#' current working directory.
#' @return ...
#' @seealso \code{\link{noiseTests}}, \code{\link{eventExtraction}}, \code{\link{plotevents}}
#' @references Yihui Xie (2013). Animation: An R Package for Creating Animations and Demonstrating Statistical Methods.
#' \emph{Journal of Statistical Software}, \bold{53}(1), 1-27. \url{http://www.jstatsoft.org/v53/ i01/}.
#' @export
#' @examples
#' set.seed(123)
#' # generate an artificial time series
#' x=c(rnorm(128),cbfs(type='box'),rnorm(128),cbfs(type='rc'),rnorm(128))
#' # generate a gif file to show the event detection process
#' # aniplotevents(x,w=128,noiseType='white',outdir=getwd())
aniplotevents <- function(x, w, noiseType = c("white", "red"), alpha = 0.05, main = "Animation plot of events", xlab = "t", ylab = "x",
movie.name = "animation.gif", interval = 0.05, ani.width = 1000, ani.height = 400, outdir = getwd()) {
noiseType <- match.arg(noiseType)
tests = c()
eventsFound = c()
pbar <- tkProgressBar("test progress bar", "Some information in %", 0, 100, 50)
saveGIF(for (i in 1:(length(x) - w)) {
info <- sprintf("%d%% done", round(i/(length(x) - w) * 100))
setTkProgressBar(pbar, round(i/(length(x) - w) * 100), sprintf("test (%s)", info), info)
xsub = x[(i + 1):(w + i)]
testx = noiseTests(xsub, w, noiseType)
tests = c(tests, testx)
if (testx <= alpha) {
if (is.null(eventsFound$start)) {
plot(c(1:(w + i)), x[1:(w + i)], ty = "l", xlab = xlab, ylab = ylab, col = "#9FC8DC", main = main)
} else {
plot(c(1:(w + i)), x[1:(w + i)], ty = "l", xlab = xlab, ylab = ylab, col = "#9FC8DC", main = main)
for (j in 1:length(a)) {
lines(c(a[j]:b[j]), x[a[j]:b[j]], xlab = xlab, ylab = ylab, col = "#ff53a9")
xline(a[j], lty = 2, col = "#ff53a9")
xline(b[j], lty = 2, col = "#ff53a9")
}
}
lines(c((i + 1):(w + i)), x[(i + 1):(w + i)], xlab = xlab, ylab = ylab, col = "#ff53a9")
} else if (testx > alpha) {
eventsFound = eventExtraction(tests, w, alpha)
if (is.null(eventsFound$start)) {
plot(c(1:(w + i)), x[1:(w + i)], ty = "l", xlab = xlab, ylab = ylab, col = "#9FC8DC", main = main)
} else {
a = ceiling((eventsFound$start + eventsFound$end)/2)
b = a + w - 1
plot(c(1:(w + i)), x[1:(w + i)], ty = "l", xlab = xlab, ylab = ylab, col = "#9FC8DC", main = main)
for (j in 1:length(a)) {
lines(c(a[j]:b[j]), x[a[j]:b[j]], xlab = xlab, ylab = ylab, col = "#ff53a9")
xline(a[j], lty = 2, col = "#ff53a9")
xline(b[j], lty = 2, col = "#ff53a9")
}
}
}
}, movie.name = movie.name, interval = interval, ani.width = ani.width, ani.height = ani.height, out.dir = outdir)
}
|
64b4eaf1eb10c4886ecc238a77be1bdc5b93a910
|
dcdf83c33f192db3d6783cf8d9f2bf947e2cd1c9
|
/man/optimizeUnits.Rd
|
1b5d361e1d7010b25b291d6522ab861341d4a5b0
|
[] |
no_license
|
edvinf/aaSimulator
|
a6461917287b05f1d48170462540a81b978252d8
|
6a5cb890388dfd0a6e9e54c5d91aebedba7da4d9
|
refs/heads/master
| 2020-12-04T03:03:33.587876
| 2020-05-27T22:58:11
| 2020-05-27T22:58:11
| 231,583,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,813
|
rd
|
optimizeUnits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulator.R
\name{optimizeUnits}
\alias{optimizeUnits}
\title{Optimize units}
\usage{
optimizeUnits(
cost,
iterations = c(5, 30, 100, 2000),
replications = c(25, 10, 3, 1),
rank = c("overlap", "overlap", 10, "all"),
attacker = NULL,
defender = NULL,
units = c(),
unittable = aaSimulator::lhtr2_units,
verbose = T
)
}
\arguments{
\item{cost}{The cost to optimize for, unit configurations must not exceed this cost}
\item{iterations}{the number of iterations to run for each unit configuration.}
\item{replications}{the number of replicates to run for each unit configuration}
\item{rank}{number of keyword for determining which results to return. See details.}
\item{attacker}{ool for attacker, NULL if attacking units are to be optimized}
\item{defender}{ool for defender, NULL if defending units are to be optimized}
\item{units}{the units in order of loss that should be sampled for optimization, formatted as \code{\link[aaSimulator]{ool}}. See details.}
\item{unittable}{table of unit properties, formatted as \code{\link[aaSimulator]{unitTable}}}
\item{verbose}{logical() whether to write progress information to stdout}
}
\value{
list with entries formatted as formatted as \code{\link[aaSimulator]{simulationStats}}, containing stats for the optimal unit configurations. See details.
}
\description{
Aproximately optimize unit configuration for fixed costs.
}
\details{
Runs simulation of for each possible unit configuration, respecting the order of parameter 'units',
and identifies optimal configurations.
The optimal configurations returned are controlled by the parameter 'rank'. If 'rank' is an integer n,
n first results will be returned, ordered by average win percentage.
'rank' may also be a keyword which will have the following effects:
\describe{
\item{'overlap'}{The highest ranking result will be returned, toghether with all results where some replicate peforms at least as good as the optimum average.}
\item{'all}{All results will be returned}
}
'units' denote order of unit groups, so that units=c("inf", "arm"), will try all combinations
of "inf" and "arm" allowed by the parameter 'cost', but always with all "inf" preceeding all "arm".
if 'iterations', 'replications' and 'rank' are vector of equal length, they specify a n iterative optimization,
where the optimal unit configurations from the first round (optimization with interations[1], replications[1], and rank[1])
are used as the only available unit configurations for subsequent runs.
}
\examples{
results <- optimizeUnits(12, defender = "2 inf", units=c("inf", "art", "arm"),
iterations=100, replications=3, rank="overlap",
verbose=TRUE)
}
|
9dc8b7a6c6b76e7e8befd62ed98fc644b38eca7a
|
5e605fdb3bd68987f776b0121f950a7aee1ccbb9
|
/R/log.likelihood.R
|
3ec10380f5ca8487f2a948df8f46d0f66b04e9e4
|
[] |
no_license
|
diystat/NBPSeq
|
f150da798677a3c27dc27cee916f960f66af149d
|
358f095f4846476c7c9ffe720b502899ea18affb
|
refs/heads/master
| 2021-01-01T16:19:01.230766
| 2014-05-18T00:19:07
| 2014-05-18T00:19:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
log.likelihood.R
|
##' The log likelihood of the NB model under the mean shape parameterization
##'
##' This function call dnbinom to compute the log likelihood from each data point and sum the results over all data points.
##' kappa, mu and y should have compatible dimensions.
##'
##' @title (private) The Log Likelihood of a NB Model
##'
##' @param kappa shape/size parameter
##' @param mu mean parameter
##' @param y a n-vector of NB counts
##' @return the log likelihood of the NB model parameterized by \code{(kappa, mu)}
l.nb= function(kappa, mu, y) {
sum(dnbinom(y, kappa, mu=mu, log=TRUE));
}
ll.nb = l.nb;
|
699b741b8f1904c2e8d06f2ede48ae26fddb3820
|
cc61c862afca41b011a8af626812543010b1453d
|
/man/view.Rd
|
58f6a23f4ff98edd3818dde52a53900c50bbdbdb
|
[
"MIT"
] |
permissive
|
JohnCoene/aframer
|
2dcfc2d4349a24258e1ceee07d122a0a46cdc8f3
|
290e43ba2f1ec21c9458ed5e6e32353e10b491b3
|
refs/heads/master
| 2020-03-26T16:28:28.346296
| 2020-03-08T21:17:28
| 2020-03-08T21:17:28
| 145,104,198
| 11
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
view.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{browse_aframe}
\alias{browse_aframe}
\alias{serve_aframe}
\alias{embed_aframe}
\title{Browse & Embed}
\usage{
browse_aframe(a)
serve_aframe(a)
embed_aframe(a, width = "100\%", height = "400px")
}
\arguments{
\item{a}{An aframe.}
\item{width, height}{Dimensions of \code{DOM} containing aframe, must be valid \code{CSS}.}
}
\description{
Browse or embed an aframe.
}
\note{
Keep the \code{width} at \code{100\%} for a responsive visualisation.
}
\examples{
browse_aframe(
a_scene(
a_dependency(),
a_box(
color = "blue",
position = xyz_aframe(0, 1, -5)
)
)
)
}
|
1213d3dced5a04c61c142b82a1ed3b3b6c298234
|
126cc53af71b48594bb904c658a021da95ea6441
|
/man/survivor_age_mean.Rd
|
4a6bb1e2ec4482dfcc9a56556f41496e1a0280ca
|
[] |
no_license
|
unimi-dse/a2b3bab6
|
5e9cc0739db4f5526412e0483661fc877f7ae790
|
d10891cdd6703d62bf08ee556a91e6fa8cd24027
|
refs/heads/master
| 2020-12-19T23:55:24.921670
| 2020-02-16T21:27:17
| 2020-02-16T21:27:17
| 235,889,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 889
|
rd
|
survivor_age_mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survivor_age_mean.R
\name{survivor_age_mean}
\alias{survivor_age_mean}
\title{Survivor Age Mean}
\usage{
survivor_age_mean()
}
\arguments{
\item{df:}{The titanic train data set which we imported and converted to df dataframe.}
\item{sboa}{Variable to store the output of the analysis.}
\item{plot_age}{: subset to store a dataframe containting age and survival rate.}
\item{graph_age}{: variable containing the graph representing the relationship between age and survival rate}
}
\value{
The mean age of the survivors and the most proobable age of survival and the graphical representation between age and survival.
}
\description{
This function analyses and returns the mean age among the people who survived.Its also gives the age group, which is most likely to survive.
}
\examples{
survivor_age_mean()
}
|
2ac0b9bdfee2347f3a838e75bb6df6b4f3d1afbf
|
e64f29c44c383284e244d6f42f0fa9b3ceea73ba
|
/IDS_freq.R
|
30f986692dce457edc42b9d48fc5e7001ced92b0
|
[] |
no_license
|
peterpnorwood/WrittenPrelim
|
a6ba7b2e354d682c8fbb80c2a5bccce650a6d70f
|
876bc82d55a19b7beaf5953a0cb35c4f2e07557d
|
refs/heads/main
| 2023-02-04T14:39:36.265394
| 2020-12-19T18:14:25
| 2020-12-19T18:14:25
| 307,384,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,835
|
r
|
IDS_freq.R
|
## ----------------------------------------------------------------- ##
## IDS_freq.R ------------------------------------------------------ ##
## Author: Peter Norwood, NC State University ---------------------- ##
## Purpose: run an experiment with a frequentist analog of IDS ----- ##
## ----------------------------------------------------------------- ##
## load functions
setwd("~/Research/Written Prelim/WrittenPrelim")
source("funcs.R")
## IDS_freq
## Purpose: run an experiment with IDS (frequentist analog)
## param train_set: dataset with context for N individuals
## param burn_in: sample size of simple randomization
## param A: vector of possible treatments
## param theta: true mean outcome parameter vector
## param sd_Y: standard deviation for response
## return dat: dataframe with X,A,mu,Y,regret,norm
IDS_freq <- function(train_set,burn_in,A,theta,sd_Y){
## number of subjects
N <- nrow(train_set)
## dimension of context
p <- ncol(train_set)-3
## number of arms
K <- length(A)
## trial dataset
dat <- matrix(NA,nrow=N,ncol=p+5)
## context
dat[1:N,1:p] <- as.matrix(train_set)[1:N,1:p]
## first burn_in interventions
dat[1:burn_in,p+1] <- train_set$A[1:burn_in]
## first burn_in means
dat[1:burn_in,p+2] <- train_set$mu[1:burn_in]
## first burn_in outcomes
dat[1:burn_in,p+3] <- train_set$Y[1:burn_in]
## name the same colnames
colnames(dat) <- c(colnames(train_set),"regret","norm")
## loop through the new patients
for(i in (burn_in+1):N){
## fit the outcome model
X_temp <- dat[1:(i-1),1:p]
A_temp <- dat[1:(i-1),p+1]
Y <- dat[1:(i-1),p+3]
temp <- data.frame(X_temp,A=A_temp,Y)
fit <- lm(Y~-1+as.factor(A)+as.factor(A):.-
as.factor(A):A,
data=temp)
## gather parameter convergence information
coef_fit <- coef(fit)
theta_hat <- c()
## put them in the same format as the theta vector
tik <- 1
for(ii in 1:K){
for(jj in 0:p){
theta_hat[tik] <- coef_fit[ii+(K)*jj]
tik=tik+1
}
}
## measure the euclidean norm between theta and theta_hat
dat[i,ncol(dat)] <- norm(matrix(theta-theta_hat),type="F")
## loop through interventions to find greedy intevention
info <- matrix(NA,nrow=length(A),ncol=4)
## determinant before addition
prev_X <- model.matrix(fit)
det_prev_XtX <- det(t(prev_X) %*% prev_X)
tick=1
for(a in A){
## gather det if a is assigned
temp2 <- data.frame(t(dat[i,1:p]),A=a,Y=0)
temp_X <- model.matrix(fit,data=rbind(temp,temp2))
det_XtX <- det(t(temp_X) %*% temp_X)
info_gain <- det_XtX/det_prev_XtX
## reward + (alpha/t)*d_k
mu_hat <- predict(fit,temp2)
## true mean outcome given a
mu <- mean_outcome(X=dat[i,1:p],A=A,a=a,theta=theta)
## save info
info[tick,] <- c(a,mu_hat,info_gain,mu)
tick=tick+1
}
## save info as dataframe
info <- data.frame(info)
colnames(info) <- c("A","mu_hat","info_gain","mu")
info$cost <- (max(info$mu_hat)+1)-info$mu_hat
info$ir <- info$cost/info$info_gain
## assign intervention
dat[i,p+1] <- info$A[which.min(info$ir)]
## find mean outcome
dat[i,p+2] <- info$mu[which.min(info$ir)]
## find outcome
dat[i,p+3] <- rnorm(1,dat[i,p+2],sd_Y)
## find regret
dat[i,p+4] <- max(info$mu) - dat[i,p+2]
}
dat <- data.frame(dat)
dat$sub <- 1:nrow(dat)
return(dat)
}
# p <- 5
# K=5
# theta <- rnorm((p+1)*K,0,1)
# train_set <- gen_data(N=500,p=p,sd_X=0.5,A=1:K,sd_Y=1,theta=theta)
# test_IDS <- IDS_freq(train_set=train_set,burn_in=(p+1)*K*3,A=1:K,theta=theta,sd_Y=1)
# #
# # hist(test_greedy$regret)
# # #
# ggplot(data=test_IDS[((p+1)*K*3+1):nrow(test_IDS),]) +
# geom_line(aes(x=sub,y=norm))
|
147865a56e24aa2093f59779f3551a07945f11b3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/astrolibR/vignettes/astrolibR.R
|
2a0665bb65c75be73c491ed7d2009e1daf434922
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
astrolibR.R
|
### R code from vignette source 'astrolibR.Rnw'
|
6df4b9492cc2fd6a4788e2d4cea7a26d73d1baab
|
e04dddfe950a587d802bb18b44ee33de0890be12
|
/cross-species/4.DE.R
|
8c7ba4356f38e17cb5704a0f00aa62b731ca4222
|
[] |
no_license
|
xscapintime/crspecies_RNAseq-analysis
|
aacaa55b468573701709020c88c9569312102ee5
|
cff864a924d5619fe770d79c570ea2dfb23ec41d
|
refs/heads/master
| 2023-07-18T20:27:57.241427
| 2021-09-18T15:12:09
| 2021-09-18T15:12:09
| 379,606,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,138
|
r
|
4.DE.R
|
rm(list = ls())
#options(scipen = 999)
library(tidyverse)
library(DESeq2)
## read count (genes below cutoff removed)
# huamn
human_exp_mat <- read.table("human_select_rdcounts.tsv")
# mouse
mouse_exp_mat <- read.table("mouse_select_rdcounts.tsv")
## only keep the homology/orthology? PAIRS
human_symbol <- read.table("human_idsyb.tsv")
mouse_symbol_tohuman <- read.table("mouse_idsyb_mapped2hugo.tsv")
index <- inner_join(human_symbol, mouse_symbol_tohuman, by = c("hgnc_symbol" = "hsapiens_homolog_associated_gene_name"))
write.table(index, file = "pairsidx.tsv", quote = F, sep = "\t")
# human count matrix
human_exp_mat$id <- row.names(human_exp_mat)
human_exp_mat <- inner_join(human_exp_mat, index[, 1:2], by = c("id" = "ensembl_gene_id.x"))
human_exp_mat <- human_exp_mat[, -29] %>% group_by(hgnc_symbol) %>% summarise_all(mean)
#human_exp_mat <- data.frame(human_exp_mat)
human_exp_mat$id <- index$ensembl_gene_id.x[match(human_exp_mat$hgnc_symbol, index$hgnc_symbol)]
# mouse count matrix
mouse_exp_mat$id <- row.names(mouse_exp_mat)
mouse_exp_mat <- inner_join(mouse_exp_mat, index[, 2:3], by = c("id" = "ensembl_gene_id.y"))
mouse_exp_mat <- mouse_exp_mat[, -6] %>% group_by(hgnc_symbol) %>% summarise_all(mean)
#mouse_exp_mat <- data.frame(mouse_exp_mat)
mouse_exp_mat$id <- index$ensembl_gene_id.y[match(mouse_exp_mat$hgnc_symbol, index$hgnc_symbol)]
## DEG
# create DESeq object
# design matrix
human_meta <- data.frame(stage = factor(c(rep("arr", each = 8),
rep("8C", each = 20))))
row.names(human_meta) <- colnames(human_exp_mat)[2:29]
mouse_meta <- data.frame(stage = factor(c(rep("dia", each = 2),
rep("E4.5", each = 3))))
row.names(mouse_meta) <- colnames(mouse_exp_mat)[2:6]
# Create DESeq2Dataset object
human_dat <- sapply(human_exp_mat[, 2:29], as.integer)
row.names(human_dat) <- human_exp_mat$hgnc_symbol
mouse_dat <- sapply(mouse_exp_mat[, 2:6], as.integer)
row.names(mouse_dat) <- mouse_exp_mat$hgnc_symbol
human_dds <- DESeqDataSetFromMatrix(countData = human_dat, colData = human_meta, design = ~ stage)
mouse_dds <- DESeqDataSetFromMatrix(countData = mouse_dat, colData = mouse_meta, design = ~ stage)
# filter
human_dds <- estimateSizeFactors(human_dds)
mouse_dds <- estimateSizeFactors(mouse_dds)
filter_h <- rowSums(counts(human_dds, normalized = TRUE)) > 0
table(filter_h)
# filter_h
# TRUE
# 15523
human_dds <- human_dds[filter_h, ]
filter_m <- rowSums(counts(mouse_dds, normalized = TRUE)) > 0
table(filter_m)
# filter_m
# TRUE
# 10783
mouse_dds <- mouse_dds[filter_m, ]
# DEG
human_res <- results(DESeq(human_dds), contrast = c("stage", "arr", "8C"),
tidy = TRUE, independentFiltering = F)
write.table(human_res, file = "human_deg.tsv", quote = F, sep = "\t")
mouse_res <- results(DESeq(mouse_dds), contrast = c("stage", "dia", "E4.5"),
tidy = TRUE, independentFiltering = F)
write.table(mouse_res, file = "mouse_deg.tsv", quote = F, sep = "\t")
|
8eb162b8201870ed45792e29d897823a51792fe8
|
becf66d452c52a4ebafbfb061c11f3c95347ddcf
|
/stats/non-merge.R
|
48e3fb967b9a7397a2f199170525a6176733af99
|
[] |
no_license
|
caiusb/MergeConflictAnalysis
|
55b40734c71e9618eae14e921782b69a79ce841e
|
e41863670505bff853c85572ccf6414b23a0a0af
|
refs/heads/master
| 2021-09-05T17:52:18.254826
| 2018-01-30T02:14:44
| 2018-01-30T02:14:44
| 29,747,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 507
|
r
|
non-merge.R
|
source("common.R")
resultsFolder <<- "../../results/merge-data"
loadNonMerge <- function(folder) {
files <- listCSVFiles(folder)
data <- data.frame(SHA = character(0),
TIME = integer(0),
AUTHOR = character(0))
data <- readCSVFiles(files, data)
return(data)
}
nonMerge <- loadNonMerge(concat(resultsFolder, "/regular"))
nonMerge$Date <- unix2POSIXct(nonMerge$COMMIT_TIME)
nonMerge <- calculateWeekdays(nonMerge)
nonMerge <- calculateTimes(nonMerge)
|
e29fc9098f9201f35a65467b0fe889fe2d16ede8
|
f5feacda6bcf986bf61cdfa57f5387ed7e651918
|
/man/compute_fit_stats.Rd
|
f29c0b70df9e9129ba43b12d5476844d3dbd3960
|
[] |
no_license
|
cran/functClust
|
3386c3179bdf9e255bfec00ed8f39b6c3c696da1
|
f7415612fbc0fd749a1da01e822b6217e2b8bb0e
|
refs/heads/master
| 2023-01-20T01:30:18.270906
| 2020-12-02T09:30:02
| 2020-12-02T09:30:02
| 318,755,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,060
|
rd
|
compute_fit_stats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validating.R
\name{compute_fit_stats}
\alias{compute_fit_stats}
\title{Statistics of model goodness-of-fit}
\usage{
compute_fit_stats(mCal, mPrd, fobs, xpr, nbK)
}
\arguments{
\item{mCal}{a numeric matrix.
This matrix is the matrix of performances predicted by the tree model.}
\item{mPrd}{a numeric matrix.
This matrix is the matrix of performances predicted by cross-validation.}
\item{fobs}{a numeric vector. The vector \code{fobs} contains the
quantitative performances of assemblages.}
\item{xpr}{a vector of numerics of \code{length(fobs)}.
The vector \code{xpr} contains the weight of each experiment,
and the labels (in \code{names(xpr)}) of different experiments.
The weigth of each experiment is used
in the computation of the Residual Sum of Squares
in the function \code{rss_clustering}.
The used formula is \code{rss}
if each experiment has the same weight.
The used formula is \code{wrss}
(barycenter of RSS for each experiment)
if each experiment has different weights.
All assemblages that belong to a given experiment
should then have a same weigth.
Each experiment is identified by its names (\code{names(xpr)})
and the RSS of each experiment is weighted by values of \code{xpr}.
The vector \code{xpr} is generated
by the function \code{stats::setNames}.}
\item{nbK}{an integer.
This integer corresponds to the number of observed assembly motifs.}
}
\value{
Return statistics of model goodness-of-fit.
}
\description{
Taks a matrix of calibrations, a matrix of predictions,
the vector of observed performances,
the number of observed assembly motifs,
and return a matrix of statistics for model goodness-of-fit.
}
\details{
Be careful, the matrix order is not ramdon.
The first argument \code{mCal} is matrix of modelled values.
The second argument \code{mPrd} is matrix of values
predicted by cross-validation.
The third argument \code{fobs} is the vector of observed values.
}
\keyword{internal}
|
ef0d8369832ade356b92e45db63c746893a41bd3
|
f1721111e077d9e5d14b4fe8f40f6baa33308fcb
|
/man/motion_readfd.Rd
|
82887a9edb7c7e4aa3a8a68a98716f8280c274b3
|
[] |
no_license
|
LabNeuroCogDevel/LNCDR
|
6d71d98c36a42ebef3479b9acc183680d0d0bb8d
|
f9944ce2ca03c38476975b59b0edb458d65ee227
|
refs/heads/master
| 2023-04-27T05:01:26.259112
| 2023-04-18T19:12:33
| 2023-04-18T19:12:33
| 41,372,116
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 268
|
rd
|
motion_readfd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motion.R
\name{motion_readfd}
\alias{motion_readfd}
\title{extract FD from fd file}
\usage{
motion_readfd(f)
}
\arguments{
\item{f}{- 'fd.1D' file}
}
\description{
extract FD from fd file
}
|
3650fcf99097d2f3c6bddee711b67b0aa2879789
|
2624780e9ac235d2b08aa69b191033fe35cdc915
|
/tests/testthat/test_summary.R
|
c0db0e4c81531d1a87f0f8723c7186def1381d6f
|
[] |
no_license
|
Sandy4321/janus
|
81c01fdc783252cff1d16cdd506bcaf1d0f22ca8
|
8dc36385a063de0e1efc0ed76bb00dccccd78012
|
refs/heads/master
| 2021-01-14T14:07:50.902289
| 2015-09-13T23:06:58
| 2015-09-13T23:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
test_summary.R
|
library(janus)
context("summarize fitted model")
test_that("error is thrown if non-janus object is given as parameter", {
mod <- lm(mpg ~ ., data = mtcars)
expect_error(summary.janus(mod))
})
|
ffc63fb9a74a3bcd8664f3f4fd223abb4605b04e
|
e67140b7633e8a45fc2662866e2a2dcd4ed904b8
|
/R/parseFactorLevels.R
|
7d672fedbdb90f1f98a8d61246ded96d9222737a
|
[] |
no_license
|
jakobbossek/farff
|
7a21de6df12ed8a5249e3d2cd68a4ce2b7193297
|
5090b3bcc4a927be80ad29091d508d57090a5718
|
refs/heads/master
| 2021-01-14T13:22:13.241728
| 2015-10-27T14:27:45
| 2015-10-27T14:27:45
| 41,908,306
| 0
| 0
| null | 2015-09-04T09:39:00
| 2015-09-04T09:38:59
|
R
|
UTF-8
|
R
| false
| false
| 1,257
|
r
|
parseFactorLevels.R
|
parseFactorLevels = function(x, line = "LINE NOT GIVEN") {
consume = function(s, r, no.match.error = FALSE) {
m = stri_match_first_regex(s, r)[1L, ]
if (is.na(m[1L])) {
if (no.match.error)
stopf("Error while parsing factor levels in line:\n%s", line)
else
return(NULL)
} else {
if (length(m) == 1L)
return(list(rest = substr(s, nchar(m[1L])+1L, nchar(s))))
else
return(list(rest = substr(s, nchar(m[1L])+1L, nchar(s)), match = m[2L]))
}
}
levs = character(0L)
z = consume(x, "^\\s*\\{\\s*", no.match.error = TRUE)
x = z$rest
while(nchar(x) > 0L) {
z = consume(x, "^'([^']*)'", no.match.error = FALSE)
lev = z$match
if (is.null(z)) {
z = consume(x, '^"([^"]*)"', no.match.error = FALSE)
lev = z$match
}
if (is.null(z)) {
z = consume(x, "^([^,}]*)", no.match.error = TRUE)
# the regexp above could also match some trailing ws before the comma
lev = stri_trim(z$match)
}
levs = c(levs, lev)
x = z$rest
z = consume(x, "\\s*,\\s*")
if (is.null(z))
break;
x = z$rest
}
z = consume(x, "^\\s*\\}\\s*", no.match.error = TRUE)
levs = stri_replace_all(levs, "%", fixed = "\\%")
return(levs)
}
|
5fc12c6b0b22c9c4cd97f4c8898f7b4f0bd929b2
|
2b3cbc05953d0502cfd03db9cc8818ceff5783c2
|
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/axEMDqiV4SCHo.R
|
07c888e4888d12e27244ce6feedd964e2cf38d14
|
[] |
no_license
|
ayanmanna8/test
|
89124aa702fba93a0b6a02dbe6914e9bc41c0d60
|
4f49ec6cc86d2b3d981940a39e07c0aeae064559
|
refs/heads/master
| 2023-03-11T19:23:17.704838
| 2021-02-22T18:46:13
| 2021-02-22T18:46:13
| 341,302,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
axEMDqiV4SCHo.R
|
with(a29fa0dbeda864e2ab65658eef89b70c2, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linka1feaw <- data.table("col1"=c("null"), "col2"=c("null")); linka1feaw <- unique(linka1feaw);aF9VIwDjq<- curate(a2Hrpdwy3col1,linka1feaw);aF9VIwDjq <- as.data.table(aF9VIwDjq);names(aF9VIwDjq)<-"avLJVF33M";FRAME878836 <- cbind(FRAME878836,aF9VIwDjq);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="avLJVF33M"] <- "location";rm(aF9VIwDjq,linka1feaw,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
dacc5e3ebc841b1ee8379c78e070319780a8308e
|
322f61a1b25bff4adbe818d6b823257e246a3640
|
/1_6.R
|
ba1bdd67f501f73e6dca45a9f53551c419eec13d
|
[] |
no_license
|
TShibano/class_data_1
|
1b25bcda427be4467df6832d87fb6e1a473822d5
|
c80eb0f7a1f2c34ba326a0c4d59b91063dbb6ae4
|
refs/heads/master
| 2020-08-04T18:51:14.488627
| 2019-11-26T03:13:12
| 2019-11-26T03:13:12
| 212,243,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 175
|
r
|
1_6.R
|
# 1
a = 2
b = 8
print(a^b)
# 2
do = pi/180
c = 67
print(sin(c * do))
# 3
d = -23.456
print(abs(d))
# 4
e = 7
f = 3
print(e%%f)
# 5
set.seed(0)
runif(10, min = 0, max = 1)
|
eb53633bae6ff6f342e79cb270f75044fb17630d
|
140fd86a6c3954128e3f3781240af79e9156430c
|
/R/ModelVarPlote.R
|
4d036d3c7ee699020c14c4c4d329da23ca955ae0
|
[] |
no_license
|
gfalbery/ggregplot
|
b69aaf0eda216795c991bc28b88cc9d5943f35ca
|
b3aeb51756f305b00ab17653793b72291935fa4b
|
refs/heads/master
| 2023-03-30T08:16:22.918679
| 2023-03-16T13:17:37
| 2023-03-16T13:17:37
| 163,702,155
| 6
| 5
| null | 2019-09-24T17:39:24
| 2018-12-31T23:08:20
|
R
|
UTF-8
|
R
| false
| false
| 458
|
r
|
ModelVarPlote.R
|
##### ModelVarPlote
ModelVarPlote<-function(model){
graph<-unique(data.frame(rbind(summary(model)$Gcovariances,summary(model)$Rcovariances)))
graph$Factor<-0
graph$Factor<-factor(rownames(graph),levels=rownames(graph))
ggplot(as.data.frame(graph),aes(x=Factor,y=post.mean))+geom_point()+geom_errorbar(aes(ymin=graph[,"l.95..CI"],ymax=graph[,"u.95..CI"]),size=0.3,width=0.4)+
geom_hline(aes(yintercept=0),lty=2)+THEME+labs(x=NULL)+coord_flip()
}
|
4ba8c21ca3d5c069eb115437908deb5ddb7cb763
|
352f976e18a570948d915aba7d4b3a32bf147186
|
/Practicals/Practical_B/Photosynthesis.Rcheck/00_pkg_src/Photosynthesis/R/get.es.R
|
c3de3a9d4ba3096df1223bf483384bef6aa5f014
|
[] |
no_license
|
femeunier/VegMod_course
|
04109dd66990be845f7a2f856d9a2212a4397ae2
|
436e66c909b260c0bc7d36dd0ad038a0d7c63b12
|
refs/heads/master
| 2023-04-23T16:46:41.346969
| 2021-05-10T10:21:23
| 2021-05-10T10:21:23
| 263,193,597
| 0
| 3
| null | 2021-05-07T10:25:49
| 2020-05-12T00:45:48
|
HTML
|
UTF-8
|
R
| false
| false
| 358
|
r
|
get.es.R
|
#' @description Calculate saturation vapor pressure
#'
#' @title get es
#' @param temp temperature in degrees K
#' @return saturation vapor pressure in mb
#' @export
#' @author David LeBauer
#' @examples
#' temp <- -30:30
#' plot(temp, get.es(temp))
get.es <- function(temp) {
return(6.11 * exp((2500000/461) * (1/273 - 1/(temp))))
} # get.es
|
270a0c548b039a7f5eff803e23fbb6254092df59
|
c9f3369c749e5a3cfebaa96dc1b484e72a3dd7d0
|
/man/nogenV.Rd
|
014739334dbefb8ffa0671a6e61663e19be53949
|
[] |
no_license
|
amoloudi/R-PKG-Distributions
|
128ff992a10a0da915f27aa941d2f9d476ad9e9a
|
20daa9657d6833cb7aff2ac9194340504c1f0270
|
refs/heads/master
| 2021-09-02T19:18:04.772144
| 2018-01-03T19:13:48
| 2018-01-03T19:13:48
| 115,752,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,008
|
rd
|
nogenV.Rd
|
\name{nogenV}
\alias{nogenV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Normal Generator Visualization
}
\description{
Improved version of nogen, Can generate n random numbers.
}
\usage{
nogenV(n, u, s)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n,u,s}{
n as number of random numbers, parameter u as mean and s as standard deviation.
}
}
\details{
-
}
\value{
This fanction returns a vector containing the random numbers.
}
\references{
www.wikipedia.com
}
\author{
A.Moayedi}
\note{
This function also plots the random numbers in order to visualize this distribution with the given mean µ and standard deviation σ.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
nogenV.R
}
\examples{
nogenV(1000, 3, 20)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~nogenV }% use one of RShowDoc("KEYWORDS")
\keyword{ ~Nnormal }% __ONLY ONE__ keyword per line
|
4b86bc1771a1e2006c09a7c4d7e08e7aba227e96
|
d628e0d2fe6e3124784c5b9b4d6901311c5f6aac
|
/Journal_figs/single_locus_selection/Culex_resistance/culex_resistance.R
|
385ad5b29175e024b8475c0230bb35e2e60f2af7
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
cooplab/popgen-notes
|
ac5f53efc75aa8e40702990116b8a4cfb35d791d
|
8c1f59f3aaed2da5e037be6bf032e5505f27d606
|
refs/heads/master
| 2023-09-03T17:39:48.129549
| 2023-05-02T04:17:19
| 2023-05-02T04:17:19
| 9,527,828
| 558
| 125
|
MIT
| 2023-06-01T04:13:47
| 2013-04-18T17:42:24
|
PostScript
|
UTF-8
|
R
| false
| false
| 1,095
|
r
|
culex_resistance.R
|
culex<-read.csv("~/Dropbox/Courses/Popgen_teaching_Notes/Journal_figs/single_locus_selection/Culex_resistance/culex_resistance.csv")
layout(t(1:2))
plot(culex[,c("dist_km_Ace1","Ace1_freq")],cex=1.2,pch=19,xlab="Distance from coast (km)",ylab="Allele frequency",ylim=c(.1,1),cex.axis=1.2,cex.lab=1.4,main="Ace 1",cex.main=1.4)
my.lowess<-lowess(culex[,c("dist_km_cline_Ace1","fitted_Ace1_freq")],f=1/7)
lines(my.lowess$x,my.lowess$y,lwd=2)
abline(v=19,col="black",lty=2)
text(x=10,y=.15, "Treated",cex=1.2)
text(x=30,y=.15, "Untreated",cex=1.2)
plot(culex[,c("dist_km_Ester","Ester_freq")],cex=1.2,pch=19,xlab="Distance from coast (km)",ylab="Allele frequency",ylim=c(.1,1),,cex.axis=1.2,cex.lab=1.4,main="Ester",cex.main=1.4)
my.lowess<-lowess(culex[-29,c("dist_km_cline_Ester","fitted_Ester_freq")],f=1/7)
lines(my.lowess$x,my.lowess$y,lwd=2)
abline(v=19,col="black",lty=2)
text(x=10,y=.15, "Treated",cex=1.2)
text(x=30,y=.15, "Untreated",cex=1.2)
dev.copy2pdf(file="~/Dropbox/Courses/Popgen_teaching_Notes/Journal_figs/single_locus_selection/Culex_resistance/culex_resistance.pdf")
|
d43bd97f0966f5b4e33050af3fd754fb13049a4e
|
b572344dacbc26cd6ccf746b75ec8ceb943cbcd6
|
/plot3.R
|
4abeaee5ad381488b5ca33b72bfef2874094ca92
|
[] |
no_license
|
amete/ExploratoryDataAnalysisProject
|
b22fe49f37e85378ff69cb468993039dee4e8128
|
a4084916097c247fd777d9354580e29212aaf9a4
|
refs/heads/master
| 2021-01-20T02:07:47.837827
| 2017-04-26T15:44:14
| 2017-04-26T15:44:14
| 89,377,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,476
|
r
|
plot3.R
|
# Download the data
emission.data.file.name <- "summarySCC_PM25.rds"
classification.data.file.name <- "Source_Classification_Code.rds"
if(!file.exists(emission.data.file.name) || !file.exists(classification.data.file.name)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
"data.zip",
method = "curl")
unzip("data.zip")
file.remove("data.zip")
}
# Load the data into memory
NEI <- readRDS(emission.data.file.name)
SCC <- readRDS(classification.data.file.name)
# Require dplyr
if(!require(dplyr)) {
install.packages("dplyr")
require(dplyr)
}
# Require ggplot2
if(!require(ggplot2)) {
install.packages("ggplot2")
require(ggplot2)
}
# Get the result
NEI_Baltimore <- NEI[NEI$fips == "24510",]
result <- summarise(group_by(NEI_Baltimore,year,type),sum(Emissions,na.rm = TRUE))
result <- as.data.frame(result)
names(result)[3] <- "Emissions"
# Plot the data
p <- qplot(year, Emissions, data=result, col = type) + geom_line(lwd = 1) + geom_point(size=2, shape=21, fill="white")
p <- p + labs(title = expression(paste("Total ","PM"[2.5]," Emission in Baltimore by Emission Type (1999-2008)")),
x = "Year", y = expression(paste("Total ","PM"[2.5]," Emission (tons)")),
colour = "Emmision Type") + theme(plot.title = element_text(hjust = 0.5))
png("plot3.png", width=640, height=480, units = "px", type="quartz")
print(p)
# Save and quit
dev.off()
|
c24a8da5cdc4e439b057282a086ffa6636699aa8
|
302f30032ecf57300d4947f458b58132418d5d57
|
/code/DiM_PG_example-calls.r
|
8ed01752990df6337ef0f8eacf018f487882aa44
|
[] |
no_license
|
m-clark/bretthorst
|
3fe22d5b1e3a2ce13cb68752bbd028e3596ea81e
|
615287b04e03d0505f151f482daa4ec37fbdf790
|
refs/heads/master
| 2022-09-17T13:19:55.855914
| 2020-06-05T15:56:48
| 2020-06-05T15:56:48
| 256,238,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,480
|
r
|
DiM_PG_example-calls.r
|
rm(list=ls())
source("DiM_PG.r")
################################################################################
# Phil Gregory
inputvalues <- list(snames = c("riverB.1","riverB.2"),
# sample 1
d1 = c(13.2,13.8,8.7,9,8.6,9.9,14.2,9.7,10.7,8.3,8.5,9.2),
# sample 2
d2 = c(8.9,9.1,8.3,6,7.7,9.9,9.9,8.9),
# Input priors and no. of steps in evaluation of p(r|D_1,D_2,I) & p(delta|D_1,D_2,I)
# ndelta = number of steps in delta parameter (mean difference)
ndelta = 1000, #100
# nr = number of steps in r parameter (ratio of the standard deviations)
nr = 1000, # 100
# Set prior limits (assumed the same for each data set) on mean (low,high),
# and prior limits (assumed the same for each data set) on the
# standard deviation (sigmalow, sigmahigh).
# upper mean
high = 12,
# lower mean
low = 7,
# upper sd
sigma.high = 4,
# lower sd
sigma.low = 1)
# call according to Phil Gregory scheme
inputvalues
dim.res <- DiM.pg(invtyp="pg", inputvalues, print.res=TRUE)
str(dim.res)
plot.DiM(DiM.res=dim.res, filling=TRUE)
################################################################################
# INPUT VALUES
# BASED ON THE METHOD OF U M STUDER
#
# Phil Gregory
# import original values
#
#> print(input.df, right=FALSE, row.names=FALSE)
# No. Standard Deviation Mean Data set
# 12 2.177085 10.31667 riverB.1
# 8 1.279997 8.58750 riverB.2
# 20 2.025593 9.62500 combined
#> print(prior.df, right=FALSE, row.names=FALSE)
# Numerical Example Value
# Prior mean lower bound 7
# Prior mean upper bound 12
# Prior standard deviation lower bound 1
# Prior standard deviation upper bound 4
# Number of steps for plotting p(delta | D_1, D_2, I) 100
# Number of steps for plotting p(r | D_1, D_2, I) 100
#> print(prob.res.df, digits=dig, right=FALSE, row.names=FALSE)
# Hypothesis Probability
# C,S = same mean, same standard deviation 0.09758
# Cbar,S = different means, same standard deviation 0.28915
# C,Sbar = same mean, different standard deviations 0.14429
# Cbar,Sbar = different means, different standard deviations 0.46898
# C = means are the same 0.24187
# Cbar = means are different 0.75813
# S = standard deviations are the same 0.38673
# Sbar = standard deviations are different 0.61327
# C,S = same means and standard deviations 0.09758
# Cbar,Sbar = one or both are different 0.90242
#> print(oddsratio.res.df, digits=dig+1, right=FALSE, row.names=FALSE)
# Hypothesis Odds Ratio
# The odds ratio in favour of a difference (means) 3.1345
# The odds ratio in favour of a difference (standard deviations) 1.5858
# The odds ratio in favour of a difference (means and standard deviations) 9.2481
umsvalues <- list(snames=c("riverB.1","riverB.2"),
si=2.1771, Ni=12,
sii=1.28, Nii=8,
Di=10.3167, Dii=8.5875,
L=7, H=12, sL=1, sH=4,
ndelta=1000, nr=1000)
umsvalues
ums2pg(umsvalues)
pgbyums.res <- DiM.pg(invtyp="ums", inputvalues=umsvalues, print.res=TRUE)
pgbyums.res
plot.DiM(DiM.res=pgbyums.res, filling=TRUE)
################################################################################
# INPUT VALUES
# BASED ON THE METHOD OF U M STUDER
#
# UM Studer (1998, p.47)
#
#> print(input.df, right=FALSE, row.names=FALSE)
# No. Standard Deviation Mean Data set
# 15 0.1074000 0.7058800 voluntary
# 16 0.1118400 0.6111100 non-voluntary
# 31 0.1181302 0.6569665 combined
#> print(prior.df, right=FALSE, row.names=FALSE)
# Numerical Example Value
# Prior mean lower bound 0.050
# Prior mean upper bound 0.950
# Prior standard deviation lower bound 0.052
# Prior standard deviation upper bound 0.118
# Number of steps for plotting p(delta | D_1, D_2, I) 100.000
# Number of steps for plotting p(r | D_1, D_2, I) 100.000
#> print(prob.res.df, digits=dig, right=FALSE, row.names=FALSE)
# Hypothesis Probability
# C,S = same mean, same standard deviation 0.20275
# Cbar,S = different means, same standard deviation 0.49970
# C,Sbar = same mean, different standard deviations 0.07608
# Cbar,Sbar = different means, different standard deviations 0.22147
# C = means are the same 0.27883
# Cbar = means are different 0.72117
# S = standard deviations are the same 0.70245
# Sbar = standard deviations are different 0.29755
# C,S = same means and standard deviations 0.20275
# Cbar,Sbar = one or both are different 0.79725
#> print(oddsratio.res.df, digits=dig+1, right=FALSE, row.names=FALSE)
# Hypothesis Odds Ratio
# The odds ratio in favour of a difference (means) 2.58639
# The odds ratio in favour of a difference (standard deviations) 0.42360
# The odds ratio in favour of a difference (means and standard deviations) 3.93224
# The odds ratio in favour of the same (means) 0.38664
# The odds ratio in favour of the same (standard deviations) 2.36074
# The odds ratio in favour of the same (means and standard deviations) 0.25431
# UM Studer 1998, p.47
# call according to UMS scheme
umsvalues <- list(snames=c("voluntary","non-voluntary"),
si=0.1074, Ni=15,
sii=0.11184, Nii=16,
Di=0.70588, Dii=0.61111,
L=0.05, H=0.95, sL=0.052, sH=0.118,
ndelta=1000, nr=1000)
umsvalues
ums2pg(umsvalues)
ums1.res <- DiM.pg(invtyp="ums", inputvalues=umsvalues, print.res=TRUE)
ums1.res
plot.DiM(DiM.res=ums1.res, filling=TRUE)
################################################################################
# INPUT VALUES
# BASED ON THE METHOD OF U M STUDER
#
# GL Bretthorst 1993, p.189) after Jaynes 1976 + 1983
#
# No. Standard Deviation Mean Data set
# 4 6.480000 50.00000 Jaynes.1
# 9 7.480000 42.00000 Jaynes.2
# 13 7.909937 44.46154 combined
#> print(prior.df, right=FALSE, row.names=FALSE)
# Numerical Example Value
# Prior mean lower bound 34
# Prior mean upper bound 58
# Prior standard deviation lower bound 3
# Prior standard deviation upper bound 10
# Number of steps for plotting p(delta | D_1, D_2, I) 100
# Number of steps for plotting p(r | D_1, D_2, I) 100
#> print(prob.res.df, digits=dig, right=FALSE, row.names=FALSE)
# Hypothesis Probability
# C,S = same mean, same standard deviation 0.1677
# Cbar,S = different means, same standard deviation 0.4157
# C,Sbar = same mean, different standard deviations 0.1069
# Cbar,Sbar = different means, different standard deviations 0.3097
# C = means are the same 0.2746
# Cbar = means are different 0.7254
# S = standard deviations are the same 0.5834
# Sbar = standard deviations are different 0.4166
# C,S = same means and standard deviations 0.1677
# Cbar,Sbar = one or both are different 0.8323
#> print(oddsratio.res.df, digits=dig+1, right=FALSE, row.names=FALSE)
# Hypothesis Odds Ratio
# The odds ratio in favour of a difference (means) 2.64118
# The odds ratio in favour of a difference (standard deviations) 0.71414
# The odds ratio in favour of a difference (means and standard deviations) 4.96299
# The odds ratio in favour of the same (means) 0.37862
# The odds ratio in favour of the same (standard deviations) 1.40028
# The odds ratio in favour of the same (means and standard deviations) 0.20149
# call according to UMS scheme
inputvalues <- list(snames=c("Jaynes.1","Jaynes.2"),
si=6.48, Ni=4, sii=7.48, Nii=9,
Di=50, Dii=42,
L=34, H=58, sL=3, sH=10,
ndelta=1000, nr=1000)
inputvalues
ums2pg(inputvalues)
jaynes.res <- DiM.pg(invtyp="ums", inputvalues, print.res=TRUE)
jaynes.res
plot.DiM(DiM.res=jaynes.res, filling=TRUE)
|
2d23972940e2d2a5485559e45f65f2ec09cf9bac
|
ec03eebabc6dfb26731404b5263a40c79c13cfbc
|
/Functions_Fonctions/PlotScripts/InfoPlotsFUN/MonthlyNtables.2xls.r
|
6a437d08a8a25caffcb5e334e1a00c8e4c4895dd
|
[] |
no_license
|
martinjeanphd/CABIN_vv_RCBA
|
3887137f09bbce7b7d2dc63e83a68ae23fbeace0
|
05d5efa98406130139008e169e53a8bb65bc6b13
|
refs/heads/master
| 2023-02-09T16:10:04.601531
| 2020-06-23T19:38:55
| 2020-06-23T19:38:55
| 244,471,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,377
|
r
|
MonthlyNtables.2xls.r
|
######################################################################################
# FUNCTION TO WRITE LIST OF MONTHLY SUMMARY TABLES to XLS
# -written for FreqTables.FUN.R
# -input=list of dataframes (1 df/summary table per group)
# - writes each Parameter list to a different sheet
######################################################################################
MonthlyN.2xls <- function(freq.ls, prc.ls, savedir, savename, stnID, LANG="English"){
#install.packages("RDCOMClient", repos = "http://www.omegahat.org/R")
#require("RDCOMClient")
#source("http://www.omegahat.org/RDCOMClient/examples/excelUtils3.R") #functions for RDCOMClient
#To create a new instance of a registered COM server class, we use the function
#the resulting R objects (ex, word and ie) are references to a generic DCOM object
xls <- COMCreate("Excel.Application")
#we can now move on and start accessing its data (properties) and calling its methods (functions).
#xls[["Visible"]] <- TRUE #opens excel
xls[["Visible"]] <- FALSE #runs without opening?
wb = xls[["Workbooks"]]$Add(1) #opens a new workbook
#function to export from list (each df in list to new sheet with group name)
rdcomexport2 <- function(prc, freq) {
if (!is.null(prc[[1]])){
sh=wb[["Worksheets"]]$Add()
if ((nchar(names(prc[1])))<=30){
sh[["Name"]] <- names(prc[1])
}else {sh[["Name"]] <- substring(names(prc[1]), 1, 29)}
exportVector(as.vector(names(prc[1])), at=sh$Range("B2"))
exportVector(as.vector("Variable:"),at=sh$Range("A2"))
exportVector(as.vector(stnID), at=sh$Range("B1"))
exportVector(as.vector("Station:"),at=sh$Range("A1"))
df.freq<- as.data.frame(freq[[1]])
exportDataFrame(df.freq,at=sh$Range("B6"))
exportVector(rownames(df.freq),at=sh$Range("A7"))
exportVector(as.vector("Sample Frequency by Month"),at=sh$Range("A4"))
df.prc <- as.data.frame(prc[[1]])
exportDataFrame(df.prc, at = sh$Range("Q6"))
exportVector(rownames(df.prc),at=sh$Range("P7"))
exportVector(as.vector("Sample Proportions(%total samples by Month)"), at=sh$Range("P4"))
}#END IF
}#end function
#apply export function to list
PLN <- length(freq.ls)
lapply(1:PLN, function(i) rdcomexport2(prc=prc.ls[i],freq=freq.ls[i]))
#CLEAN UP AND SAVE WORKBOOK:
if(LANG=="French") {
xls$Sheets("Feuil1")$Delete()
}else if (LANG=="English"){
xls$Sheets("Sheet1")$Delete()
}
setwd(savedir)
filename <- paste(getwd(),"/",savename, "_", stnID,".xlsx",sep="")
filename <- gsub("\\/","\\\\",filename)
wb$SaveAs(filename)
wb$Close(filename)
print("Export to excel complete")
}#end function
##########################################################################
#example to write code for df input instead of list (split by variable)
#rdcomexport <- function(df) {
#sh=wb[["Worksheets"]]$Add()
#sh[["Name"]] <- as.character(df$Group[1])
#exportDataFrame(df, at = sh$Range("A1"))
#}
#d_ply(iris,.(Species), rdcomexport) #apply export function to dataframe
##########################################################################
|
2f378b140fd357455f80ba9f25669bbc3bf08693
|
590d0ac331da7be6398818df8c4c2b50fb83d072
|
/cachematrix.R
|
718f4f49f121017a0f541c8c42a4953507d7ca01
|
[] |
no_license
|
Coconuthack/ProgrammingAssignment2
|
48b98549b58ab71246815f38da1d7529de473144
|
72238aab20dc7da8b1327f6898584fe9fc1c5f85
|
refs/heads/master
| 2021-01-24T00:09:24.502180
| 2014-05-25T20:14:55
| 2014-05-25T20:14:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,901
|
r
|
cachematrix.R
|
## The two functions below make matrix computations more efficient by
## providing a way to create a matrix, compute its inverse and cache it for
## future computation
## This function creates a special matrix with space to set and get its cached
## inverse once computed
makeCacheMatrix <- function(x = matrix(), ...) {
i <- NULL
## to be able to make matrix with initialized arguments
## e.g. m <- makeCacheMatrix(1,2,2) which is a 2x2 matrix with 1 for each elemen
x <- matrix(x, ...)
set <- function(y, ...){
## replaces the intialized/old matrix based on 'y, ...' values
x <<- matrix(y, ...)
i <<- NULL ## resets the inverse
}
get <- function() x #returns the cached matrix
## sets the cached inverted matrix to the supplied 'inverse' value
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
## returns a list with the values as the set and get functions
## and makes the functions accessible via subsetting e.g. x$set(matrix)
list( set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function solves the inverse of the special matrix and caches it in the original object,
## and returns the cached matrix
cacheSolve <- function(x, ...) {
i <- x$getInverse()
## returns the cached inverse if already compute ( and exits the function )
if(!is.null(i)){
message("Retrieving cached data")
return(i)
}
## assignes the matrix, that is stored in the special matrix object, to 'matrix'
matrix <- x$get()
## solves teh inverse of the retrieved matrix
i <- solve(matrix, ...)
## stores the solved inverse of the matrix in the special matrix object
x$setInverse(i)
## Return a matrix that is the inverse of 'x'
i
}
|
ec9d95217a2224cb7426b958031afe7a0a72d4f8
|
d1667295bdadc16eed11901d79a4d06cca62fb13
|
/results/Untitled.R
|
cb20b86da328bd9b7b6c5810f8a62c8b417aad60
|
[] |
no_license
|
loneharoon/KDD2017
|
de9c5dd8eafed74aa89622b6041b669a43984ec2
|
a2399f02c23f00c34bfdc9f172e18410b5e780c1
|
refs/heads/master
| 2021-03-27T16:34:33.512063
| 2018-11-02T12:09:05
| 2018-11-02T12:09:05
| 92,056,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,785
|
r
|
Untitled.R
|
neuralnetwork_procedure_withoutweather <- function(train_data,test_data,hourwindow,daywindow){
#days <- length(unique(lubridate::day(index(test_data))))
days <- as.numeric( last(as.Date(index(test_data))) - first(as.Date(index(test_data))) )
result <- list()
for (i in 1:days) {
# browser()
result[[i]] <- compute_Neural_model_withoutweather(train_data,test_data,hourwindow,daywindow)
testsplit <- split.xts(test_data,"days",k=1)
train_data <- rbind(train_data,testsplit[[1]]) # update train data
test_data <- do.call(rbind, testsplit[2:length(testsplit)])# update test data
}
#browser()
finresult <- do.call(rbind,result)
return(finresult)
}
compute_Neural_model_withoutweather <- function(train_data,test_days,windowsize,trainingdays) {
#browser()
#get train data acc to test day nature(train or test day)
# train_data <- separate_weekend_weekday(train_data,test_days)
library(caret)
split_train <- split.xts(train_data,f="days",k = 1)
prac_day <- xts::last(split_train)
temp <- tail(split_train,trainingdays)
temp <- temp[1:length(temp)-1] # recent seven without last one, already used for testing
xdat <- create_feature_matrix(temp)
colnames(xdat) <- paste0('D',dim(xdat)[2]:1)
#pdatsplit <- split(prac_day[[1]],lubridate::hour(index(prac_day[[1]])))
pdatsplit <- split_hourwise(prac_day[[1]],windowsize)
#browser()
hhmodels <- list()
for (i in 1:length(pdatsplit)) {
#browser()
testinterval <- pdatsplit[[i]]
#next lines ensures that all values are not same otherwise scale function fails
#testinterval$temperature <- testinterval$temperature + rnorm(NROW(testinterval),0.01,0.01)
#testinterval$humidity <- testinterval$humidity + rnorm(NROW(testinterval),0.01,0.01)
temp <- xdat[lubridate::hour(xdat) %in% unique(lubridate::hour(testinterval))]
temp <- subset(temp,select = -D1) # temporary fix. otherwise code creates sometimes problems
# browser()
stat <- apply(temp,2,function(x) length(unique(x))==1) # remove columns which have same values -> result in NA at scale()
temp <- temp[,!stat]
temp_N_anom <- get_selected_nonanomalousdays(temp)
temp_N_anom <- temp_N_anom[,(dim(temp_N_anom)[2]):(dim(temp_N_anom)[2] - 5)] # USING ONLY 5 DAYS FOR TRAINING
datas <- cbind(coredata(temp_N_anom),coredata(testinterval))
datas <- as.data.frame(datas)
datas_temp <- scale(datas[,!colnames(datas)%in% c("power")]) # scaling only regressors
# datas_temp <- cbind(datas_temp,power=datas$power)# combining regressor and predictor
#modelformula <- as.formula(log(power) ~ D1 + D2 + D3 +D4 + temperature + humidity)
hhmodels[[i]] <- avNNet(x = datas_temp, y = datas$power, size = 10, decay = 0.05,
linout = TRUE, maxit = 500)
}
print("NN training done")
# NOW PREDICT FOR ACTUAL TEST DAY
ydat <- split.xts(test_days,"days",k=1)[[1]] # data of current test day only
temp2 <- tail(split_train,trainingdays)
xdat2 <- create_feature_matrix(temp2)
colnames(xdat2) <- paste0('D',dim(xdat2)[2]:1)
#ydatsplit <- split(ydat,lubridate::hour(index(ydat)))
ydatsplit <- split_hourwise(ydat,windowsize)
#browser()
pred_value <- list()
for (i in 1:length(ydatsplit)) {
#browser()
testinterval2 <- ydatsplit[[i]]
#testinterval2$temperature <- testinterval2$temperature + rnorm(NROW(testinterval2),0.01,0.01)
#testinterval2$humidity <- testinterval2$humidity + rnorm(NROW(testinterval2),0.01,0.01)
temp3 <- xdat2[lubridate::hour(xdat2) %in% unique(lubridate::hour(testinterval2))]
stat2 <- apply(temp3,2,function(x) length(unique(x))==1) # remove columns which have same values -> result in NA at scale()
temp3 <- temp3[,!stat2]
temp3_N_anom <- get_selected_nonanomalousdays(temp3) # removing anomalous days
temp3_N_anom <- temp3_N_anom[,(dim(temp3_N_anom)[2]):(dim(temp3_N_anom)[2] - 5)] #USING ONLY 5 DAYS FOR TRAINING
datas2 <- cbind(coredata(temp3_N_anom),coredata(testinterval2))
datas2 <- as.data.frame(datas2)
datas2_copy <- datas2 # replica used afterwards
datas_temp2 <- scale(datas2[,!colnames(datas2)%in% c("power")]) # scaling only regressors
# datas_temp2 <- cbind(datas_temp2,power=datas2$power)# combining regressor and predictor
# browser()
print(last(index(testinterval2)))
pred_value[[i]] <- predict(hhmodels[[i]], newdata = datas_temp2)
pred_value[[i]] <- ifelse(pred_value[[i]]<0,100+rnorm(1,2,2),pred_value[[i]])
bands <- compute_predictionband(datas2_copy,pred_value[[i]],2) #computing prediction bands
pred_value[[i]] <- cbind(fit=pred_value[[i]],bands)
}
pred_energy <- xts(do.call(rbind,pred_value),index(ydat))
#pred_energy <- xts(unlist(pred_value),index(ydat))
return(pred_energy)
}
|
c83b2a44d5d074f269c6940e3a0b0902d522ce80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/languageR/examples/xylowess.fnc.Rd.R
|
70fbcfec87b432bbca3626b988d688369f72ba04
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
xylowess.fnc.Rd.R
|
library(languageR)
### Name: xylowess.fnc
### Title: Trellis scatterplot with smoothers
### Aliases: xylowess.fnc
### Keywords: regression
### ** Examples
## Not run:
##D data(weightRatings)
##D xylowess.fnc(Rating ~ Frequency | Subject, data = weightRatings,
##D xlab = "log Frequency", ylab = "Weight Rating")
## End(Not run)
|
8291eba2bc0f286afe88d6076e97a4ba5c2473a3
|
070dceea65adaa4b5b14f0eea4f1839b519acedf
|
/functions/calc_LUE_smith.R
|
922105853893420aa7ce7aeca2b78a77b7981db7
|
[] |
no_license
|
Shirleycwj/LUE_CO2
|
c9f2023edb345eec64b3a4d060b19cb4498ef575
|
78edb4d39481773604936253cf8a2266c4f3f37b
|
refs/heads/master
| 2023-04-12T21:36:34.505940
| 2021-04-21T20:10:51
| 2021-04-21T20:10:51
| 358,309,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
r
|
calc_LUE_smith.R
|
# this script simulate LUE as in Smith et al. 2019
calc_LUE_smith <- function(cao, temp, vpd, z, theta,c_s) {
patm <- cal_patm(z)
vpd_pa <- vpd * 1e3
ca <- cao * 1e-6 * patm
K <- cal_K_pa(temp,z) #Pa
gamma_star <- cal_gammastar_pa(temp,z)
chi <- cal_chi(temp,z,vpd,cao)
eta_star <- cal_etastar(temp,z)
ci <- chi * ca # Pa
m <- ((ci - gamma_star)/(ci + (2 * gamma_star))) #calculate m as in Smith et al. 2019
omega <- calc_omega(theta, c_s, m)
omega_star <- (1 + (omega) - sqrt((1 + (omega))^2 - (4 * theta * omega)))
LUE = (m * omega_star)/(8 * theta)
LUE
}
|
aa1551995e9f1f8bddc0d29588bfe35d18b69c95
|
aa3c74e4fd4c3865dc102e8b1324d6d9aa43a26d
|
/R/rtwibble.R
|
5f733f5c663258e5b82dc45c2e93ac1123631cfb
|
[] |
no_license
|
mkearney/rtw
|
9b38f4282b4f61e4347cbcdbe53ac20ae5fb63cc
|
2e113bd17a5fe100cac9b39b0b7d7fde4484807c
|
refs/heads/main
| 2023-08-25T14:40:24.091921
| 2021-11-05T18:06:02
| 2021-11-05T18:06:02
| 420,239,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,353
|
r
|
rtwibble.R
|
add_class <- function(x, ...) unique(c(..., class(x)))
as_rtwibble <- function(x = NULL) {
if (is.null(x) || NROW(x) == 0) {
x <- data.frame()
}
x <- as.data.frame(x, row.names = NULL, stringsAsFactors = FALSE)
structure(x, class = add_class(x, "rtwibble"))
}
hd <- function(x, n = 10) {
if (!is.data.frame(x)) {
return(x)
}
n <- min(c(NROW(x), n))
x[seq_len(n), , drop = FALSE]
}
#' @export
print.rtwibble <- function(x, n = 10, ...) {
cat("# rtwibble (", NROW(x), " x ", NCOL(x), ")\n", sep = "")
if (NROW(x) == 0) {
return(invisible())
}
if (!all(c("text", "screen_name", "created_at") %in% names(x))) {
p <- hd(x, n = n)
print.data.frame(p)
return(invisible(x))
}
vars <- names(x)
vars <- sub("favou?rites?", "favs", sub("followers", "flw", sub("friends", "fds", vars)))
vars <- sub("account_", "act_", sub("account_created", "act_crt", sub("_count", "_cnt", vars)))
vars <- unique(sub("(?<=quoted|reply_to|retweet|mentions|place|country|profile)_.*",
"_..", vars, perl = TRUE))
vars <- grep("_expanded|t\\.co|_type|coords_coords|bbox|protected|display_text", vars, invert = TRUE, value = TRUE)
vars <- unique(sub("_url", "", vars))
vars <- paste("*", vars)
if (length(vars) %% 4 == 1) {
vars <- c(vars, "", "", "")
}
if (length(vars) %% 4 == 2) {
vars <- c(vars, "", "")
}
if (length(vars) %% 4 == 3) {
vars <- c(vars, "")
}
v1 <- vars[seq_len(length(vars) / 4)]
v2 <- vars[(length(vars) / 4 + 1):(length(vars) / 4 * 2)]
v3 <- vars[(length(vars) / 4 * 2 + 1):(length(vars) / 4 * 3)]
v4 <- vars[!vars %in% v1 & !vars %in% v2 & !vars %in% v3]
chars <- max(nchar(v1)) - nchar(v1)
v1 <- paste0(v1, sapply(chars, function(.x) paste(rep(" ", .x), collapse = "")))
chars <- max(nchar(v2)) - nchar(v2)
v2 <- paste0(v2, sapply(chars, function(.x) paste(rep(" ", .x), collapse = "")))
chars <- max(nchar(v3)) - nchar(v3)
v3 <- paste0(v3, sapply(chars, function(.x) paste(rep(" ", .x), collapse = "")))
vars <- paste(v1, v2, v3, v4)
vars <- paste(vars, collapse = "\n")
#vars <- strwrap(paste(sort(vars), collapse = ";"), getOption("width"))
p <- hd(x[, names(x) %in% c("created_at", "screen_name", "text")], n = n)
w <- getOption("width", 80)
w <- max(c(getOption("width", 80), 80))
w <- w - (52 + max(nchar(p[["screen_name"]])))
dots <- nchar(p[["text"]]) > w
p[["text"]] <- substr(p[["text"]], 1, w)
wdts <- calc_width(p[["text"]])
wdts[wdts <= 0.125] <- 0
w <- ceiling(w - (w * wdts * 2.4))
w[w < 5] <- 5
p[["text"]] <- substr(p[["text"]], 1, w)
p[["text"]] <- ifelse(dots, paste0(p[["text"]], "..."), p[["text"]])
p[["..."]] <- "."
print.data.frame(p)
cat(" ...\n", sep = "")
cat("Other variables:", vars, fill = TRUE)
invisible(x)
}
#' @importFrom utils object.size
calc_width <- function(x) {
char <- nchar(x)
uncd <- log1p(count_unicode(x))
size <- sapply(x, utils::object.size, USE.NAMES = FALSE)
-2.469e-03 * char +
1.479e-03 * size +
-1.329e-02 * uncd +
2.325e-05 * (char^2) +
-6.766e-06 * (char * size) +
-5.000e-05 * (char * uncd)
}
count_unicode <- function(x) {
x <- iconv(x, 'utf-8', 'ascii', sub = '<UNICODE>')
m <- gregexpr("<UNICODE>", x)
lens <- lengths(m)
lens[lens == 1] <- ifelse(sapply(m[lens == 1], function(.x) .x < 0), 0, 1)
lens
}
|
a99340051d38425e6c4ac6fddab82093c9bf8a96
|
96d1e3eb80e81f6a29eac4df65151f3b17c0ccdd
|
/R/decon.primary.R
|
b3d407e634691c6ba9fcaec930d884d58bd2048f
|
[] |
no_license
|
donaldtmcknight/microDecon
|
83ba8e9fd722169214398fc2cd2a499bbd3c0091
|
85cb9ab41124cad1fca311ce9e2b603218b73ba6
|
refs/heads/master
| 2021-06-15T03:18:32.684712
| 2021-02-19T01:44:26
| 2021-02-19T01:44:26
| 162,779,976
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,604
|
r
|
decon.primary.R
|
#decontaminates sample based on regression 1
#Takes a data frame of only three columns (ID, blank, sample)
decon.regress1<- function(contamination){
colnames(contamination)[1:2] <- c("OTU","blank")
#removes the OTU column and limits the sample to just the ones that amplified in the blank
cont <- subset(contamination, blank > 0)
#this preserves the lables
labels <- cont[,1]
#This removes the lables
cont.unlabeled <- cont[,2:ncol(contamination)]
if(sum(cont.unlabeled[,2])>0){
prop.trans2 <- function(x){x/(sum(x)+(0.1*sum(x)))}
#calcualtes the proportions for each sample and the blank (still for subset contamination)
cont.prop <- cbind.data.frame(prop.trans(cont.unlabeled[,1]),prop.trans2(cont.unlabeled[,2]))
#calcualtes the percent differences between each sample and the blank
perc.dif <- (cont.prop[,1]-cont.prop)/cont.prop[,1]
sample.labeled.i <- cbind.data.frame(labels,perc.dif[,2])
colnames(sample.labeled.i) <- c("OTU","perc_dif")
sample.labeled.i <- sample.labeled.i[order(sample.labeled.i$perc_dif,decreasing=T),]
sample.labeled.i <- sample.labeled.i[sample.labeled.i$perc_dif != "NaN",]
sample.labeled.i <- sample.labeled.i[sample.labeled.i$perc_dif != 1,]
sample.labeled.i <- sample.labeled.i[sample.labeled.i$perc_dif != -Inf,]
r <- sample.labeled.i[,2]
r <- rank(r)
perc.diff.i <- cbind.data.frame(r,sample.labeled.i)
perc.diff.i <- perc.diff.i[order(perc.diff.i$r,decreasing=T),]
perc.diff.i <- perc.diff.i[,2:3]
otuzero.i <- sample.labeled.i[,2]
otuzero.i <- length(otuzero.i[otuzero.i > 0.1])
otuzero3.i <- (0.7754*otuzero.i)-4.2185
otuzero3.i <- round(otuzero3.i)
if(otuzero3.i <= 0){otuzero3.i <- 1}
if(otuzero3.i > nrow(perc.diff.i)){otuzero3.i <- nrow(perc.diff.i)}
if(otuzero3.i <= 0){otuzero3.i <- 1}
otuzero3.i <- perc.diff.i[otuzero3.i,1]
otuzero3.blank.i <- cont[cont$OTU == otuzero3.i,2]
otuzero3.blank.ratios.i <- cont[,2]/otuzero3.blank.i
otuzero3.correction.i <- cont[cont$OTU == otuzero3.i,3]
otuzero3.correction.i <- otuzero3.blank.ratios.i*otuzero3.correction.i}else{otuzero3.correction.i <- cont.unlabeled[,2]}
mean.i <- otuzero3.correction.i
corrected <- cont[,3:ncol(cont)]-mean.i
corrected[corrected < 0] <- 0
corrected <- round(corrected)
corrected <- cbind.data.frame(labels,cont[,2],corrected)
colnames(corrected) <- colnames(cont)
corrected <- rbind.data.frame(corrected,subset(contamination,blank==0))
colnames(corrected) <- c("OTU",c(1:(ncol(corrected)-1)))
corrected}
|
26e75b2d3d6c3060c7af09aada564abb74dd1038
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gofCopula/examples/tests11.Rd.R
|
98f9d70cb1289ba8149feceb224ed279c765f059
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
tests11.Rd.R
|
library(gofCopula)
### Name: gofPIOSRn
### Title: 2 and 3 dimensional gof test based on the in-and-out-of-sample
### approach
### Aliases: gofPIOSRn
### ** Examples
data(IndexReturns)
gofPIOSRn("normal", IndexReturns[c(1:100),c(1:2)], M = 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.