blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d43a8caf26d2204ecea8b113e0f9dd7021fb24 | 9923e30eb99716bfc179ba2bb789dcddc28f45e6 | /openapi-generator/r/tests/testthat/test_dvir_base_next_driver_signature.R | e89301d4f8d1346dfd92c2545e4fb1812eb5923e | [] | no_license | silverspace/samsara-sdks | cefcd61458ed3c3753ac5e6bf767229dd8df9485 | c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa | refs/heads/master | 2020-04-25T13:16:59.137551 | 2019-03-01T05:49:05 | 2019-03-01T05:49:05 | 172,804,041 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,635 | r | test_dvir_base_next_driver_signature.R | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test DvirBaseNextDriverSignature")
model.instance <- DvirBaseNextDriverSignature$new()
test_that("driverId", {
# tests for the property `driverId` (integer)
# ID of the driver who signed the DVIR
# uncomment below to test the property
#expect_equal(model.instance$`driverId`, "EXPECTED_RESULT")
})
test_that("name", {
# tests for the property `name` (character)
# The name of the driver who signed the next DVIR on this vehicle.
# uncomment below to test the property
#expect_equal(model.instance$`name`, "EXPECTED_RESULT")
})
test_that("signedAt", {
# tests for the property `signedAt` (integer)
# The time in millis when the next driver signed the DVIR on this vehicle.
# uncomment below to test the property
#expect_equal(model.instance$`signedAt`, "EXPECTED_RESULT")
})
test_that("type", {
# tests for the property `type` (character)
# Type corresponds to driver.
# uncomment below to test the property
#expect_equal(model.instance$`type`, "EXPECTED_RESULT")
})
test_that("email", {
# tests for the property `email` (character)
# Email of the driver who signed the next DVIR on this vehicle.
# uncomment below to test the property
#expect_equal(model.instance$`email`, "EXPECTED_RESULT")
})
test_that("username", {
# tests for the property `username` (character)
# Username of the driver who signed the next DVIR on this vehicle.
# uncomment below to test the property
#expect_equal(model.instance$`username`, "EXPECTED_RESULT")
})
|
615d194f544ddd2659ffe660907ed760a77e613e | c33146c58a091216567a764332944909c25b6988 | /man/hier.part.internal.Rd | 46a8149219896ed2e37d86bbe20c1b0e0877a475 | [] | no_license | cran/hier.part | 116629f4dfb5d5e9c79d92c142b24b2c314b88bf | 79f96670a1750a659eb2dbfd829c45e89cdb848c | refs/heads/master | 2021-06-01T19:42:27.836337 | 2020-03-03T06:10:03 | 2020-03-03T06:10:03 | 17,696,643 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 196 | rd | hier.part.internal.Rd | \name{hier.part.internal}
\alias{combos1}
\alias{current.model}
\title{
Internal functions for hier.part package
}
\description{
Internal functions for hier.part package
}
\keyword{internal}
|
533f043d4d03d820ed5a645c469fae7e58c1cc41 | 1fafd1d1145f11c8ab3ca38aff4efecea1ad1d11 | /EDA Case Study/Uber Supply-Demand Gap.R | 68a05b72e3b3af6f34e11891005d3f805c99757e | [] | no_license | vijayapotturu/IIITB-PGDDA | c6861f94641dd991f817cf5575bd02700eff2324 | 37d6e86e34f4e73caa5a0eb9a414bbbf1995d9de | refs/heads/master | 2020-04-04T09:04:52.353185 | 2019-02-13T02:12:14 | 2019-02-13T02:12:14 | 155,806,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,396 | r | Uber Supply-Demand Gap.R | # creating & setting working directory
# setwd(file.path("/Users/raj/Documents/Vijaya/Upgrad/Course 2/", "Uber Case Study"))
# getwd()
# loading relevant packages
library(stringr)
library(readr)
library(tidyr)
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(lubridate)
# Loading Data
uber_req <- read.csv("Uber Request Data.csv", stringsAsFactors = FALSE)
# 1.Analysing&Understanding the Data
# looking at data
str(uber_req)
head(uber_req)
nrow(uber_req)
summary(uber_req)
# There are 6745 records with 6 variables with 5 days of data from Uber between July 11 2016 to July 16 2016
# Request.id is unique column in the dataset.
# Pickup point will be either City/Airport .there are no uppercase and lower case issues.
# There are 2650 are NA's in Driver id .
# Status is either Cancelled/No Cars Available.there are no uppercase and lower case issues.
# Request.timestamp is having differant data time formats.
# Drop.timestamp is having differant data time formats.NA Records exists in the dataset.
# 1.1.checking for blanks
sapply(uber_req, function(x) length(which(x == ""))) # checking for blank "" values
# 1.2. look for duplicate values in the Request Id's.
#duplicated(uber_req$Request.id)
sum(duplicated("uber_req$Request id")) # no Duplciate Request id's
# 1.3. Missing values
sum(is.na(uber_req)) #6564 NA
summary(uber_req)
sum(is.na(uber_req$Request.timestamp))
# counting NA's in Driver id.
sum(is.na(uber_req$Driver.id)) # 2650 Na's in Driver Id.
# counting NA's in Drop time.
sum(is.na(uber_req$Drop.timestamp)) # 3914 NA's in Drop time so these records are for status "Cancelled" and "No Cars Available".
# finding how many rows by each status to validate 3914 NA rows.
summary(factor(uber_req$Status)) # 3 levels
# Cancelled 1264 + No cars Avilable 2650 is 3914 NA's in Drop time so this is perfectly fine in the dro time stamp.
# 1.4 We need to find any repetitions in the dataset for each column. It seems same drivers have been doing multiple trips.
apply(uber_req,2,function(x) sum(duplicated(x)))
# 1.5 each request is unique and this data is of July 2016
apply(uber_req,2,function(x) length(unique(x)))
# 2.Data Cleaning and Preparation
# Converting Categorical variables as factors
# Request ID,pickup point,status and Driver.ID Needs to change int to "factor" data type
str(uber_req)
uber_req$Pickup.point <- as.factor(uber_req$Pickup.point)
# uber_req$Request.id <- as.factor(uber_req$Request.id)
uber_req$Status <- as.factor(uber_req$Status)
uber_req$Driver.id <- as.factor(uber_req$Driver.id)
str(uber_req)
# Data Cleaning - Date formats
# Standardizing date fomat separator form "/" to "-" and removing seconds as this granular level analysis is not required
uber_req$Request.timestamp = str_replace_all(uber_req$Request.timestamp,"\\/","-")
uber_req$Drop.timestamp = str_replace_all(uber_req$Drop.timestamp,"\\/","-")
# converting into standard R date-time format
uber_req$Request.timestamp<-parse_date_time(uber_req$Request.timestamp,orders=c("%d-%m-%Y %H:%M","%d/%m/%Y %H:%M:%S"), exact = FALSE)
uber_req$Drop.timestamp<-parse_date_time(uber_req$Drop.timestamp,orders=c("%d-%m-%Y %H:%M","%d/%m/%Y %H:%M:%S"), exact = FALSE)
str(uber_req) # Verifying the date and time column formats
# Derive new variables on Date and Time
# Extracting hour of day from Request time and Drop time
# creating separate columns from the date & time: day, month, year, hours and minutes
uber_req$Request.timestamp1 <- uber_req$Request.timestamp
uber_req$Drop.timestamp1 <- uber_req$Drop.timestamp
uber_req <- separate(data = uber_req, col = "Request.timestamp1", into = c("req.date","req.time"), sep = " ")
uber_req <- separate(data = uber_req, col = "Drop.timestamp1", into = c("drop.date","drop.time"), sep = " ")
uber_req$request_day <- format(uber_req$Request.timestamp, "%d")
# uber_req1$request_month = format(uber_req1$Request.timestamp, "%m")
# uber_req1$request_year = format(uber_req1$Request.timestamp, "%Y")
uber_req$Req.hrs <- format(uber_req$Request.timestamp, "%H")
# uber_req1$Req.minutes <- format(uber_req1$Request.timestamp, "%M")
uber_req$drop_day <- format(uber_req$Drop.timestamp, "%d")
uber_req$drop.hrs <- format(uber_req$Drop.timestamp, "%H")
#Converting Request_day,Req.hrs,drop_day,drop.hrs as factor
uber_req$Req.hrs <- as.numeric(uber_req$Req.hrs)
str(uber_req)
#for time slots variable,Assumiptions are
# 00 to 05 as Early Morning,
# great than or equal to 05 to 9 as Morning,
# great than or equal to 10 to 12 (12 PM) as before noon,
# great than or equal to 12(12 PM) to 17 (5PM) as afternoon,
# great than or equal to 17(5 PM) to 21(9PM) as Evening,
# great than or equal to 21(9 PM) as Late Evening
uber_req$Time_Slot[uber_req$Req.hrs >= 00 & uber_req$Req.hrs < 5] <- c("00-4AM")
uber_req$Time_Slot[uber_req$Req.hrs >= 05 & uber_req$Req.hrs <= 09] <- c("05-9AM")
uber_req$Time_Slot[uber_req$Req.hrs >= 10 & uber_req$Req.hrs < 14] <- c("10AM-2PM")
uber_req$Time_Slot[uber_req$Req.hrs >= 14 & uber_req$Req.hrs < 17] <- c("14-17PM")
uber_req$Time_Slot[uber_req$Req.hrs >= 17 & uber_req$Req.hrs <= 21] <- c("17-21PM")
uber_req$Time_Slot[uber_req$Req.hrs > 21] <- c("21 - 23PM")
# Verifying the count hourly to time slot
summary(factor(uber_req$Time_Slot))
summary(factor(uber_req$Req.hrs))
uber_req$Time_Slot <- as.factor(uber_req$Time_Slot)
uber_req$Req.hrs <- as.factor(uber_req$Req.hrs)
uber_req$request_day <- as.factor(uber_req$request_day)
uber_req$drop_day <- as.factor(uber_req$drop_day)
uber_req$drop.hrs <- as.factor(uber_req$drop.hrs)
str(uber_req)
# Sorting Data based on Driver.id and Request Time
uber_req <- uber_req[order(uber_req$Driver.id, uber_req$Request.timestamp),]
# Task2 : Data Analysis
# Univariate and segmented analysis
# Plots 1 : This plot gives frequencies of requests by pick up points in each status this will help to where is Most problem in either Airport/City.Selected bar chart(stack) based on Bar height we can assess on high level where is most problem .
ggplot(data = uber_req,
mapping = aes(x = Status,fill = Pickup.point)) +
geom_bar(position = "Stack") +
labs(title ="Frequency of Requests by Status,Airport/City",
x= "Booking requests by Status", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5))+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Obseravation : Cancelled + No Cars Available from the Airport is higher than City.Trips completed from the city is clearly states it is higher than Airport.Cars Availabilty at Airport seems be major problem.
# Plot 2 : The below plot will identify which are peak hours
ggplot(uber_req,
mapping = aes(x = uber_req$Req.hrs,fill = Pickup.point)) +
geom_bar(position = "stack") +
labs(title ="Frequency of Requests by Cabs at Airport/city", x= "Booking requests in a day (hrs)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5))+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Obseravation : Number requests from City to Aiport between 5 Am to 10 AM is high and Airport to City between 17 PM to 21 PM is high.
uber_Airport <- uber_req %>%
filter(Pickup.point == "Airport")
uber_Airport_drop <- uber_req %>%
filter(Pickup.point == "Airport" & Status == 'Trip Completed')
uber_City <- uber_req %>%
filter(Pickup.point == "City")
uber_City_drop <- uber_req %>%
filter(Pickup.point == "City" & Status == 'Trip Completed')
# Plot 3: The below plot will identify which status is the problematic
Requests_Airport1 <- ggplot(uber_Airport,
mapping = aes(x = Req.hrs,fill = Status)) +
geom_bar(position = "stack") +
labs(title ="Frequency of Requests by Cabs at Airport", x= "Booking requests in a day (hrs)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5),legend.position="none")+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5),check_overlap = TRUE)
Requests_City1 <- ggplot(uber_City,
mapping = aes(x = Req.hrs,fill = Status)) +
geom_bar(position = "stack") +
labs(title ="Frequency of Requests by Cabs at city", x= "Booking requests in a day (hrs)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5),legend.position="bottom")+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5),check_overlap = TRUE)
grid.arrange(Requests_Airport1, Requests_City1, nrow = 2)
# Obseravation : Cars unavailabilty seems to higher than cancellation at Aiport and in City Cancellation are high during the Peak hours.
# 1.Visually identify the most pressing problems for Uber.
# Hint: Create plots to visualise the frequency of requests that get cancelled or show 'no cars available'; identify the most problematic types of requests (city to airport / airport to city etc.) and the time slots (early mornings, late evenings etc.) using plots.
# Plot 1 To visualise frequency of requests & drops to analyse the Peak Hours (city to airport / airport to city)
# Plot 1 :Show the frequency of number of requests& drops by status during each hour of the day by pick up point
ggplot(data = uber_req,
mapping = aes(x = uber_req$Req.hrs,fill = Status)) +
geom_histogram(position = "stack", stat = "count") + facet_wrap(req.date~Pickup.point) +
labs(title ="Frequency of Daily Requests by Cab Status,Airport/City", x= "Booking requests in a day (hrs)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5))
# Observation :Cancellations and cab unavailability are consistent on all the days. Airport to city or city to Airport cab request were not catered due to either unavailability or cancellations of the cab booking.
# Also Cab unavailability is high for Airport pickup requests and Cancellations are high for City pickup requests
# summary(uber_req$Status)
# Plot 2 :Will Show the frequency of number of request by Cancelled or No cars Available during each hour of the day by pick up point
uber_can_nocars <- uber_req %>%
filter(Status == "Cancelled" | Status == "No Cars Available")
# summary(uber_can_nocars$Status)
# to Display Frequency of requests by Cancelled/"No cars Available" by Date wise in Airport/City
ggplot(uber_can_nocars,
mapping = aes(x = uber_can_nocars$Req.hrs, fill = Status)) +
geom_histogram(position = "dodge", stat = "count") + facet_wrap(req.date ~ Pickup.point) +
labs(title ="Frequency of Requests by Cabs Cancelled/No cars Available at Airport,Date Wise", x= "Booking requests in a day (hrs)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5))
# Observation : Cancellations are high on most of days from the city and Cars Availability Seems to be the issue at Airport during peak hours most of the days.
#Plot 3 :to Display Frequency of requests by Cancelled/"No cars Available" by time slot by Date wise
ggplot(data = uber_can_nocars,
mapping = aes(x = Time_Slot,fill = Status)) +
geom_histogram(position = "stack", stat = "count") + facet_wrap(~req.date) +
labs(title ="Frequency of Requests by Cab Status by Date", x= "Booking requests by Time Slot", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5)) +
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Observation : Cancellations are high on most of days from the city and Cars Availability Seems to be the issue at Airport during peak hour time_slots.
# to Display Frequency of requests by Cancelled/"No cars Available",Airport/City
ggplot(data = uber_can_nocars,
mapping = aes(x = Time_Slot, fill = Status )) +
geom_histogram(position = "Stack", stat = "count") + facet_wrap(~Pickup.point) +
labs(title ="Frequency of Requests by Cabs Cancelled/No cars Available,Airport/City", x= "Booking requests by Time Slot", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5)) +
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Observation : This Chart clearly shows at Airport there are no Cabs Available (between 5PM to 9 PM).More Cancellation from City Between 5 AM to 9 AM.
# 2.Find out the gap between supply and demand and show the same using plots.
# Find the time slots when the highest gap exists
# Find the types of requests (city-airport or airport-city) for which the gap is the most severe in the identified time slots
# Gap Analysis by hourly wise
# Demand and supply
# Assumptions : Demand = number of cab requests done. Trip completed + Trip cancelled + Cabs unavailable
# Supply = Number of cab availability. Only trip completed is considered as supply. Trip cancelled / cab unavailability so did not consider as part of supply
uber_req$Status_Flag <- (uber_req$Status %in% 'Trip Completed')
summary(uber_req$Status_Flag)
uber_req$Status_Flag[uber_req$Status_Flag == TRUE] <- c("Trip Completed")
uber_req$Status_Flag[uber_req$Status_Flag == FALSE] <- c("Requests not Served")
# Demand Vs Supply by Date Wise
ggplot(uber_req,aes(x=Req.hrs,fill=Status_Flag)) + geom_bar(stat='count',position = "stack")+
ggtitle("Daily Demand for Cabs by Hourly")+ facet_wrap(~req.date) +
labs(x="Time in Hours", y="Number of Cabs Requested")+
labs(fill="Demand Vs Supply")
# Observation : Most of the days requests are not fulfilled during peack hours at both the pick up points.
# Demand Vs Supply by pick up point
ggplot(uber_req,aes(x=Req.hrs,fill=Status_Flag)) + geom_bar(stat='count',position = "stack")+
ggtitle("Hourly Demand for Cabs")+ facet_wrap(~Pickup.point) +
labs(x="Time in Hours", y="Number of Cabs Requested")+
labs(fill="Demand Vs Supply") +
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Observation : There is a huge gap between deamnd and Supply during peak hours.
# summary(factor(uber_req$drop.hrs))
# summary(factor(uber_req$Req.hrs))
# Gap Analysis by Time slot wise
# Demand Vs Supply by date by time slot
ggplot(uber_req,aes(x=Time_Slot,fill=Status_Flag)) + geom_bar(stat='count',position = "stack")+
ggtitle("Daily Demand for Cabs by time slots")+ facet_wrap(~req.date) +
labs(x="Time slots", y="Number of Cabs Requested")+
labs(fill="Demand Vs Supply")+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Demand Vs Supply by pick up point by time slot
ggplot(uber_req,aes(x=Time_Slot,fill=Status_Flag)) + geom_bar(stat='count',position = "stack")+
ggtitle("Hourly Demand & Supply for Cabs by Time slots")+ facet_wrap(~Pickup.point) +
labs(x="Time slots", y="Number of Cabs Requested")+
labs(fill="Demand Vs Supply") +
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# Demand Vs Supply by pick up point by time slot
Requests_Airport <- ggplot(uber_Airport, aes(x = Time_Slot)) +
geom_bar(stat='count',fill="darkblue") +
labs(title ="Frequency of Requests at Airport", x= "Booking requests in a day (Time_slot)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5),legend.position="none") +
geom_text(aes(y=(..count..),label=(..count..),colour = "white",fontface = "bold"),size=3,stat='count',position = position_stack(vjust = 0.5))
# summary(uber_Airport$Time_Slot)
Drops_Airport <- ggplot(uber_Airport_drop, aes(x = Time_Slot)) +
geom_bar(stat='count',fill="Orange") +
labs(title ="Frequency of drops at Airport", x= "Drops in a day (Time_slot)", y= "Count of Drops") +
theme(plot.title = element_text(hjust = 0.5),legend.position="none") +
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# summary(uber_Airport_drop$Time_Slot)
Requests_City <- ggplot(uber_City, aes(x = Time_Slot)) +
geom_bar(stat='count',fill="darkblue") +
labs(title ="Frequency of Requests at City", x= "Booking requests in a day (Time_slot)", y= "Count of Requests") +
theme(plot.title = element_text(hjust = 0.5),legend.position="none")+
geom_text(aes(y=(..count..),label=(..count..),colour = "white", fontface = "bold"),size=3,stat='count',position = position_stack(vjust = 0.5))
# summary(uber_City$Time_Slot)
Drops_City <- ggplot(uber_City_drop, aes(x = Time_Slot)) +
geom_bar(stat='count',fill="Orange") +
labs(title ="Frequency of drops at City", x= "Drops in a day (Time_slot)", y= "Count of Drops") +
theme(plot.title = element_text(hjust = 0.5),legend.position="none")+
geom_text(aes(y=(..count..),label=(..count..)),size=3,stat='count',position = position_stack(vjust = 0.5))
# summary(uber_City_drop$Time_Slot)
grid.arrange(Requests_Airport, Drops_Airport,Requests_City, Drops_City, nrow = 2)
# Observation : There is a hige gap between deamnd and Supply during peak hours.There are two problemd identified from the above visualisations are why are more cancellations in the morning slot?Why there are no cabs at Airport between 5PM to 9 PM.
# calculating the Total Triptime
uber_req$triptime <- as.numeric(uber_req$Drop.timestamp-uber_req$Request.timestamp)
Average_trip_time <- mean(!is.na(uber_req$triptime))*60
Average_trip_time
#Average_trip_time <- uber_req %>%
# filter(Status == 'Trip Completed') %>%
# group_by(Pickup.point) %>%
# summarize(avg_trip_time = mean(!is.na(uber_req$triptime))*60)
# to Understand total Jorney time from Airport/city or City/Airport
ggplot(data = uber_req, mapping = aes(y = triptime, x = Req.hrs, fill = Pickup.point)) +
geom_boxplot() + theme(title = element_text(size=9, face="bold"))+
labs(title ="Total trip Duration pattern in a weekday ",
x= "Booking requests in a day (Hrs)", y = "total Trip time")
# Observation: total Trip time is relatively high to Airport in the morning (5 am to 10 am) & high to city in evening(5pm to 21pm)"
# this will lead more cancellations to Airport by cab drivers so that they can avoid long waiting time at airport and have more trips with in City.
# Exporting clean data frame for operations in tableau
write.csv(uber_req, "Uber Request Final.csv", na = "",row.names = FALSE)
|
b3d9554676f968fc392326a34883629ef3eccc4c | f307665431280b3534acaaa954a5a8e20f825225 | /1 - Intro to Data Science/assignment_1/assignment1.R | ac8aa1506883a2350a594e49fdb8e2432facf06f | [] | no_license | jessie-jensen/uw_data_science_cert | c17fd24e0d306a85ee690f4cc1f7d9679e105ad9 | b7aec0ab5f6bac2dd05769e3e4c1189646acc162 | refs/heads/master | 2023-06-27T08:45:46.281722 | 2016-11-02T22:10:37 | 2016-11-02T22:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,610 | r | assignment1.R | # 1 Download and Install R. Then download and install R studio. Calculate 2 + 3 in R studio . Type your name into the console. Take a screenshot of R-studio (not just the console) and name the screenshot file: RStudio.jpg or RStudio.png or RStudio.pdf. The format should be jpg, png, or pdf.
'Done'
# 2 Join the LinkedIn group for this course. Introduce yourself, start a discussion, or make a comment on an existing discussion. Write the topic of that discussion in a txt file called discussion.txt
"Favorite R packages?"
# 3 Review the patterns described in DataScience01a.R and use R to get the Indian Liver Patient Dataset from the UCI machine learning repository.
# url <- http://archive.ics.uci.edu/ml/machine-learning-databases/00225/Indian%20Liver%20Patient%20Dataset%20(ILPD).csv # Copy this url carefully
# ILPD <- read.csv(url, header=FALSE, stringsAsFactors=FALSE)
url <- "http://archive.ics.uci.edu/ml/machine-learning-databases/00225/Indian%20Liver%20Patient%20Dataset%20(ILPD).csv"
ILPD <- read.csv(url, header=FALSE, stringsAsFactors=FALSE)
# 4 The following was not covered in class. Get the 11 column headers from this page: http://archive.ics.uci.edu/ml/datasets/ILPD+(Indian+Liver+Patient+Dataset)#
# Manually construct a vector of column headers using this pattern:
# a.headers <- c(<name1>, <name2>, .) # Each column has a name
# Associate names with the dataframe using this pattern:
# a.names(<dataframe>) <- headers
# 1. Age Age of the patient
# 2. Gender Gender of the patient
# 3. TB Total Bilirubin
# 4. DB Direct Bilirubin
# 5. Alkphos Alkaline Phosphotase
# 6. Sgpt Alamine Aminotransferase
# 7. Sgot Aspartate Aminotransferase
# 8. TP Total Protiens
# 9. ALB Albumin
# 10. A/G Ratio Albumin and Globulin Ratio
# 11. Selector field used to split the data into two sets (labeled by the experts)
headers <- c("age","gender","tb","db","alkphos","sgpt","sgot","tp","alb","a_g_ratio","test_set")
names(ILPD) <- headers
# 5 Use head(ILPD) to view the first 6 rows.
head(ILPD)
# 6 Review the patterns described in DataScience01a.R. Write code to determine the mean, median, and standard deviation (sd) of each column and present their values in the console. Some calculations may fail. Where applicable, fix the failures by using na.rm = TRUE. Type ?median to see how.
lapply(ILPD, mean)
lapply(ILPD, median)
lapply(ILPD, sd)
# 7 Review the patterns described in DataScience01a.R Create Histograms (hist) for each column where possible.
i = 1
for (i in 1:ncol(ILPD)) {
if (class(ILPD[,i]) == "integer" | class(ILPD[,i]) == "numeric") {
hist(ILPD[,i], xlab = colnames(ILPD)[i], main = paste("Histogram of", colnames(ILPD)[i]))
}
i = i + 1
}
# 8 Review the patterns described in DataScience01a.R Use the plot(ILPD) function on this data frame to present a general overview of the data. You want to see a matrix of many plots. You may have some problems because the Gender column is not numeric. You can skip the Gender column, or you can turn the gender column into a numeric column. You might need help from a fellow student, the LinkedIn group, or me.
library(dplyr)
plot(select(ILPD, -gender))
# 9 Look at the plots from plot(ILPD) and answer:
# 9a What can you say about the data?
'
Total Bilirubin (tb) & Direct Bilirubin (db) seem to be strongly positively correlated with each other.
This also appears to be the case for Total Protiens (tp) and albumin (alb).
Age appears uncorrelated with most features.
'
# 9b How can you tell if a vector contains continuous numbers or binary data?
'Binary number features only appear in the plot with 2 distinct values on the relevant axis'
# 9c How can you tell if two vectors are correlated?
'They are tightly packed and follow a general trend.'
# 10 Review the patterns described in DataScience01b.R Write code to remove outliers from the following vector and present the result in the console: c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
x <- c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
highLimit <- mean(x) + 2*sd(x)
lowLimit <- mean(x) - 2*sd(x)
noOutliers <- (x < highLimit) & (x > lowLimit)
x[noOutliers]
# 11 Review the patterns described in DataScience01b.R Write code to relabel the following vector. Use the shortest strings for each category in the relabeled version. Present the result in the console: c('BS', 'MS', 'PhD', 'HS', 'Bachelors', 'Masters', 'High School', 'BS', 'MS', 'MS')
x <- c('BS', 'MS', 'PhD', 'HS', 'Bachelors', 'Masters', 'High School', 'BS', 'MS', 'MS')
x[x == "Bachelors"] <- "BS"
x[x == "Masters"] <- "MS"
x[x == "High School"] <- "HS"
x
# 12 Review the patterns described in DataScience01b.R Write code to normalize the following vector using a Min-Max normalization and present the result in the console: c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
x <- c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
a <- min(x)
b <- max(x) - min(x)
normalized <- (x - a) / b
normalized
# 13 Review the patterns described in DataScience01b.R Write code to normalize the following vector using a Z-score normalization and present the result in the console: c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
x <- c(-1, 1, -1, 1, 1, 17, -3, 1, 1, 3)
a <- mean(x)
b <- sd(x)
normalized <- (x - a) / b
normalized
# 14 Review the patterns described in DataScience01b.R Write code to binarize: c('Red', 'Green', 'Blue', 'Green', 'Blue', 'Blue', 'Blue', 'Red', 'Green', 'Blue') and present the result in the console
x <- c('Red', 'Green', 'Blue', 'Green', 'Blue', 'Blue', 'Blue', 'Red', 'Green', 'Blue')
isRed <- x == 'Red'
isGreen <- x == 'Green'
isBlue <- x == 'Blue'
cbind(isRed,isGreen,isBlue)
# 15 Review the patterns described in DataScience01b.R Write code to discretize the following vector into 3 bins of equal range and present the result in the console: c(81, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 12, 23, 24, 25)
x <- c(81, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 12, 23, 24, 25)
range <- max(x) - min(x)
binWidth <- range / 3
bin1Min <- -Inf
bin1Max <- min(x) + binWidth
bin2Max <- min(x) + 2*binWidth
bin3Max <- Inf
xDiscretized <- rep(NA, length(x))
xDiscretized
xDiscretized[bin1Min < x & x <= bin1Max] <- "Low"
xDiscretized[bin1Max < x & x <= bin2Max] <- "Middle"
xDiscretized[bin2Max < x & x <= bin3Max] <- "High"
xDiscretized
# 16 Discretize the following vector into 3 bins of equal of near equal amounts of numbers. No Code is necessary, just present the results as commented text. c(81, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 12, 23, 24, 25)
x <- c(81, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 12, 23, 24, 25)
x_thirds <- ntile(x, 3)
x_thirds
# the ntile dplyr function breaks breaks a vector into n different bins of the near equal observation size
table(x_thirds)
# 17 Submit to canvas the screenshot from item 1, the txt file from item 2, and an R script that contains the answers to items 3 through 16. Submit by Saturday 11:57 PM to the Homework Submission site on Canvas in the Module called "Lesson 01". The Assignment is called "Assignment 01". If you cannot submit the assignment on time, please notify me before the deadline at ErnstHe@UW.edu and put "Data Science UW 2016 Assignment 01 late" (without quotes) in the email subject line
# 18 Reading assignment
#http://en.wikipedia.org/wiki/Cluster_analysis
#http://en.wikipedia.org/wiki/K-means_clustering
#http://home.deib.polimi.it/matteucc/Clustering/tutorial_html/
#http://www.sqlserverdatamining.com/ArtOfClustering/default.aspx
#19 Look through Preview section of Lesson 01 Overview |
e05951d3fb54a41a77c81e65f7f42afd3e61654b | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.end.user.computing/man/workspaces_update_connection_alias_permission.Rd | 08e872869ffef9ecaba8632dc852f873da2cb917 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,144 | rd | workspaces_update_connection_alias_permission.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspaces_operations.R
\name{workspaces_update_connection_alias_permission}
\alias{workspaces_update_connection_alias_permission}
\title{Shares or unshares a connection alias with one account by specifying
whether that account has permission to associate the connection alias
with a directory}
\usage{
workspaces_update_connection_alias_permission(AliasId,
ConnectionAliasPermission)
}
\arguments{
\item{AliasId}{[required] The identifier of the connection alias that you want to update
permissions for.}
\item{ConnectionAliasPermission}{[required] Indicates whether to share or unshare the connection alias with the
specified AWS account.}
}
\value{
An empty list.
}
\description{
Shares or unshares a connection alias with one account by specifying
whether that account has permission to associate the connection alias
with a directory. If the association permission is granted, the
connection alias is shared with that account. If the association
permission is revoked, the connection alias is unshared with the
account. For more information, see \href{https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html}{Cross-Region Redirection for Amazon WorkSpaces}.
\itemize{
\item Before performing this operation, call
\href{https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html}{DescribeConnectionAliases}
to make sure that the current state of the connection alias is
\code{CREATED}.
\item To delete a connection alias that has been shared, the shared
account must first disassociate the connection alias from any
directories it has been associated with. Then you must unshare the
connection alias from the account it has been shared with. You can
delete a connection alias only after it is no longer shared with any
accounts or associated with any directories.
}
}
\section{Request syntax}{
\preformatted{svc$update_connection_alias_permission(
AliasId = "string",
ConnectionAliasPermission = list(
SharedAccountId = "string",
AllowAssociation = TRUE|FALSE
)
)
}
}
\keyword{internal}
|
ddf8519f0ea5cb9a614237671655e5f564ed39ad | 11394cd22cea3b4e644d20564ff4b500018d943e | /scripts/step6_split_norm.R | 71c99abeb0875c3e662d242441b5cb16d6e21e9c | [
"MIT"
] | permissive | shunsunsun/single_cell_rna_seq_snakemake | 3d153c9cb7db9988917aff38991217a35940aa64 | f275546eb3bd63d5d535a13407ce47ee36e94cae | refs/heads/master | 2023-04-17T08:44:13.954986 | 2021-04-27T08:13:50 | 2021-04-27T08:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,296 | r | step6_split_norm.R | suppressPackageStartupMessages(library(Seurat))
suppressPackageStartupMessages(library(future))
suppressPackageStartupMessages(library(future.apply))
input <- snakemake@input[[1]] ## filtered merged data as a seurat obj
outfile <- snakemake@output[[1]]
outdir <- snakemake@params[["dir"]]
if(!dir.exists(outdir)){
dir.create(outdir,recursive=T)
}
outdir1 <- dirname(outfile)
if(!dir.exists(outdir1)){
dir.create(outdir1,recursive=T)
}
suffix=gsub("Obj","",basename(outdir))
#It is technically unsound to regress out nUMI and nGene, which scTransform automatically regresses out
if(snakemake@params[["regressNum"]]){
regVars=c('mitoCountRatio', 'nFeature_RNA', 'nCount_RNA')
}else{
regVars=c('mitoCountRatio')
}
nworker <- min(as.numeric(snakemake@threads),length(availableWorkers()))
cat(sprintf("Use %d workders\n",nworker))
## Read input
flted_seurat <- readRDS(input)
## Load cell cycle markers
load(snakemake@params[["ccgenes"]])
plan("multiprocess", workers = nworker)
options(future.globals.maxSize = 20*1024^3)
# Split seurat object to perform cell cycle scoring and SCTransform on all samples
split_seurat <- SplitObject(flted_seurat, split.by = "orig.ident")
# Normalize each sample
future_lapply(X = split_seurat, future.seed=1129, FUN = function(s) {
sample=unique(s@meta.data$orig.ident)
##After filtering, sample-level cell/feature number criterion may no long be met
x=CreateSeuratObject(counts=GetAssayData(object=s, slot='counts'), meta.data=s@meta.data, min.cells=1, min.features=1)
cat(sprintf("Processing %s with %d features and %d cells", sample, dim(x)[1], dim(x)[2]),file=outfile,append=T,sep="\n")
if(!snakemake@params[["sctPreNorm"]]){
tryCatch({
tmp <- NormalizeData(x)
tmp <- CellCycleScoring(tmp, g2m.features=g2m_genes, s.features=s_genes)
tmp$cc_difference <- tmp$S.Score - tmp$G2M.Score
x <- SCTransform(tmp, variable.features.n=5000, vars.to.regress = c(regVars, 'cc_difference'))
}, error=function(cond){ ##in case cell cycle scoring has any problem
#if(grepl("Insufficient data values to produce", cond, fixed=TRUE)){
cat(sprintf("No cell cycle scoring due to %s",cond),file=outfile,append=T,sep="\n")
x <- SCTransform(x, variable.features.n=5000, vars.to.regress = regVars)
#}
})
}else{
tryCatch({
tmp <- SCTransform(x, assay = 'RNA', new.assay.name = 'SCT', vars.to.regress = regVars)
tmp <- CellCycleScoring(tmp, s.features = s_genes, g2m.features = g2m_genes, assay = 'SCT', set.ident = TRUE)
tmp$cc_difference <- tmp$S.Score - tmp$G2M.Score
x <- SCTransform(tmp, assay = 'RNA', new.assay.name = 'SCT', variable.features.n=5000, vars.to.regress = c(regVars, 'cc_difference'))
}, error=function(cond){
cat(sprintf("No cell cycle scoring due to %s",cond),file=outfile,append=T,sep="\n")
x <- SCTransform(x, variable.features.n=5000, vars.to.regress = regVars)
})
}
saveRDS(x, file=paste0(outdir,"/",sample,".",suffix,".rds"))
})
options(future.globals.maxSize = 500*1024^2) #500M
plan(sequential)
|
8685d3b748b69d977acc90fae39f80ca4aa7209e | fa41498eb72267e705fcec152f4f93bd293ee0a4 | /OCR/bwbb ocr.R | 83d927f5efd36b27bcbe06d6fb2eaa9ef5d315b6 | [] | no_license | sidxi/week-six | 599fadbd447b9dee6541a442e98652d9b5e4c124 | dd93646bae4b3bd8239cd074ac8e2509b8962fe7 | refs/heads/master | 2022-11-05T20:53:51.709826 | 2020-06-17T04:55:27 | 2020-06-17T04:55:27 | 272,305,365 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 469 | r | bwbb ocr.R | library(magick)
library(magrittr)
library(tesseract)
text <- image_read("bwbb5July1992_Page_1_Image_0001.png") %>%
image_resize("2000") %>%
image_convert(colorspace = 'gray') %>%
image_trim() %>%
image_ocr()
write.table(text, "output.txt")
text <- image_read("bwbb5July1992_Page_2_Image_0001.png") %>%
image_resize("2000") %>%
image_convert(colorspace = 'gray') %>%
image_trim() %>%
image_ocr()
write.table(text, "bwbb5July1992.2.txt")
|
ca955ed99b2ebf7ad61d9a08d13abfd47fd6d7ea | 953f9e769da44b32dbb628d2cf49a52cc300647c | /server.R | ebb86e127d98366472507596034daddd8cdea18a | [] | no_license | CarolinaBrinholi/projetoshinny | f333ef5eeda36536110658fa3b6b3e9ac4d6b91b | e5f62ee1d6db467ad470181644f26b95343379fa | refs/heads/master | 2020-04-28T05:57:43.786916 | 2019-03-11T17:34:53 | 2019-03-11T17:34:53 | 175,038,413 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 4,856 | r | server.R |
# install.packages("googleAnalyticsR")
getwd()
# setwd("C:/Users/CarolinaFagundes/Desktop/trainning_shinny")
#### FIRST TRY with ga data ####
# building the dataframe with the ga data
####################################
library(googleAnalyticsR)
library(ggplot2)
library(stringr)
library(dplyr)
# #EASY AUTHOTIZATION ON ANALYTICS:
ga_auth()
#
# ##MUITO BOM: CONSIGO VER TODAS AS CONTAS REGISTRADAS NO ANALYTICS
# ## NO IESB SÃO 60:
my_accounts <- google_analytics_account_list()
# #View(my_accounts)
#
# #Use my_accounts to find the viewId
my_id <- "167966494"
#
#
# #set date variables for DYNAMIC date range
# ########### CHANGE THAT ##
start_date <- "2018-10-01"
end_date <- "2019-02-25"
#
# #Session Query - Uses start_date and end_date
# #### the final data frame for FIRST METRICS
df1 <- google_analytics_4(my_id,
date_range = c(start_date, end_date),
metrics = c("users", "sessions", "bounceRate", "uniquepageviews"),
dimensions = c("date","ga:deviceCategory"))
### the data frame for pageviews
df2 <- google_analytics(my_id,
date_range = c(start_date, end_date),
metrics = c("users","sessions", "bounceRate", "uniquepageviews"),
dimensions = c("date",
"hour","ga:deviceCategory", "ga:pagePath"),
max = -1)
df2$weekday <- weekdays(as.Date(df2$date))
df2$pagePath_2 <- str_split(df2$pagePath, pattern = fixed("/"), n=5)
x <- do.call(rbind, df2$pagePath_2)
colnames(x) <- LETTERS[1:ncol(x)]
df2<- cbind(df2, x)
#limpeza de string:
df2$B <- gsub("\\#.*","",df2$B)
df2$B <- gsub("\\_.*","",df2$B)
df2$B <- gsub("\\?.*","",df2$B)
df2$B <- gsub("[\\^0-9.]","",df2$B)
df2$B <- gsub("\\-.*","",df2$B)
df2$B <- gsub("\\..*","",df2$B)
df2$B <- gsub("\\ .*","",df2$B)
df2$B <- tolower(df2$B)
df2$A <- grepl("vestibularead", df2$B, fixed = TRUE)
df2[which(df2$A == TRUE), 12] <- "ead"
df2$A <- grepl("vestibular", df2$B)
df2[which(df2$A == TRUE), 12] <- "vestibular"
df2$A <- grepl("cricao", df2$B)
df2[which(df2$A == TRUE), 12] <- "inscricao"
df2$A <- grepl("graduacao", df2$B)
df2[which(df2$A == TRUE), 12] <- "graduacao"
df2$A <- grepl("resultado", df2$B)
df2[which(df2$A == TRUE), 12] <- "graduacao"
unique(df2$B)
############################
library(dplyr)
library(ggplot2)
library(tidyr)
server <- function(input, output) {
output$value <- renderPrint({ input$date })
#first: reactive dataset
#df_graph <- select(df1,date, deviceCategory, n = sessions)
df_graph <- reactive({
select(df1,date, deviceCategory, n = input$n)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
paste(input$n, "por device")
})
output$graph <- renderPlot({
req(input$date)
selected <- df1 %>%
# convert thtr_rel_date to Date format:
filter(date >= input$date[1] & date <= input$date[2])
df_graph <- reactive({
select(selected,date, deviceCategory, n = input$n)
})
ggplot(data = df_graph() , aes(x = date, y = n,
color = deviceCategory)) +
geom_line() + scale_x_date(date_minor_breaks = "1 day", date_labels = "%d/%m") +
labs(x ="data", y = "variável de escolha")
})
output$graph2 <- renderPlot({
req(input$date)
selected <- df2 %>%
filter(date >= input$date[1] & date <= input$date[2])
df_graph2 <- reactive({
select(selected,hour, deviceCategory, n = input$n)
})
ggplot(data = df_graph2()) +
geom_line(aes(x=hour, y=n,color = deviceCategory)) +
labs(x ="hora", y = "variável de escolha")
})
output$graph3 <- renderPlot({
req(input$date)
selected <- df2 %>%
filter(date >= input$date[1] & date <= input$date[2])
df_graph3 <- reactive({
select(selected,weekday, deviceCategory, n = input$n)
})
ggplot(data = df_graph3()) +
geom_line(aes(x=weekday, y=n,color = deviceCategory)) +
labs(x ="dia da semana", y = "variável de escolha")
})
output$graph4 <- renderPlot({
req(input$date)
selected <- df2 %>%
filter(date >= input$date[1] & date <= input$date[2])
df_graph4 <- reactive({
select(selected,B, deviceCategory, n = input$n)
})
ggplot(data = df_graph4()) +
geom_line(aes(x=B, y=n,color = deviceCategory)) +
labs(x ="pagePath", y = "variável de escolha")
})
}
|
0894e9f45a4a9b30b47761f2dbf5ece0459b326d | e9a76b782286e21268bfa068f6d96f769176ec4b | /ui.R | 3bf6f4d947a8574feabbc5bcf226a2b01e7cf72b | [] | no_license | arbuci/DDP_Project | c458aa9ab766d99e783cb052e8efee7072fa743a | 3180e393b6ea0de3abf3690f0a03ebece1f6887e | refs/heads/master | 2021-01-20T05:20:53.659129 | 2017-08-25T19:26:31 | 2017-08-25T19:26:31 | 101,435,552 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,712 | r | ui.R |
library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme = shinytheme("cosmo"),
headerPanel("Rx Staffing Prediction"),
sidebarPanel(
h3("Parameters"),
h4("All parameters are required"),
numericInput('acute_census', 'Acute Care Census', 165, min = 0),
numericInput('icu_census', 'ICU Census', 50, min = 0),
numericInput('surgeries', 'Scheduled Surgeries', 47, min = 0),
numericInput('ed_arrivals', 'Yesterday ED Arrivals', 240, min = 0),
radioButtons('weekend', "Weekend", choices = c("Y", "N"), selected = "N", inline = TRUE),
numericInput('orders_per', 'Orders per Pharmacist', 350, min = 1)
),
mainPanel(
h3("Introduction"),
p("One of the key productivity metrics for hospital inpatient pharmacies is the verification of new medication orders by a pharmacist. This has to be completed before the medication can be dispensed. This application predicts a number of orders that will require verification based on several inputs, and determines the number of pharmacists that will be required to fulfill all the verification tasks."),
h3("Instructions"),
p("Set values for the current day's censuses, previous day's ED arrivals, weekend flag, and expected surgeries to predict the number of orders requiring verification. Adjust the number of orders per pharmacist parameter to set the expected workload of each pharmacist and calculate how many pharmacists will be needed to address the predicted number of orders."),
h3("Result"),
"The model predicts ",
strong(textOutput('text1', inline = TRUE)),
" orders and a need for ",
strong(textOutput('text2', inline = TRUE)),
" pharmacists."
)
))
|
893461798c6ccb1326530dfaa86f3729a1221ef5 | 076c560b664792aa2c6f701fb6c5e8acc05469e1 | /SampleSpecificScoringandModelling.R | f4dab695255c0229247fc2e2883e4cfb358499ec | [] | no_license | ypriverol/TGEClassification | 80ed79a727f88e62ae2b12fe8a9c767d7890688b | 1201fb0e4ae871ab8e9e74f2424e6dcc8f6e2876 | refs/heads/master | 2020-04-20T20:05:21.223090 | 2017-10-20T12:59:51 | 2017-10-20T12:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,058 | r | SampleSpecificScoringandModelling.R |
args <- commandArgs(trailingOnly = TRUE)
args.length <- length(args)
if(args.length < 4){
print ("Error: Not sufficient parameters")
print ("Rscript SampleSpecificScoringandModelling.R coseq.train.tsv peptides.csv conseq.test.tsv conseq.test.sss.tsv")
}
conseq.train.file=args[1]
peptide.file=args[2]
conseq.test.file=args[3]
conseq.test.outfile=args[4]
#conseq.file="/data/SBCS-BessantLab/shyama/Data/Bristol/Human/adenovirus/PITDB/AminoAcids-or-ORFs-orTGEs/human_adeno.assemblies.fasta.transdecoder.pep.identified.fasta_conseq.tsv"
#peptide.file="/data/SBCS-BessantLab/shyama/Data/Bristol/Human/adenovirus/PITDB/PSMs-Peptides-ORFs/human_adeno+fdr+th+grouping_filtered.csv"
conseq.matrix.train=read.table(conseq.train.file, header=TRUE, sep='\t')
peptides=read.table(peptide.file, header=TRUE, sep=',')
peptide.train.conseq=conseq.matrix.train[which(conseq.matrix.train[,'Peptide'] %in% peptides[,'Sequence']),]
conseq.matrix.test=read.table(conseq.test.file, header=TRUE, sep='\t')
##Train model.
data.points=seq(0.01,max(conseq.matrix.train$detectability.predicted.by.Random.Forest),0.01)
xy=matrix(0,nrow=length(data.points),ncol=4)
rownames(xy)=data.points
colnames(xy)=c('c2','sss','psm','specEval')
xy[,'c2']=data.points
sss=apply(xy,1,function(xy,x,y){
windowStart=xy['c2']-0.05
windowStop=xy['c2']+0.05
ident=length(unique(y[which(windowStart<=y$detectability.predicted.by.Random.Forest & y$detectability.predicted.by.Random.Forest<=windowStop),'Peptide']))
total=length(unique(x[which(windowStart<=x$detectability.predicted.by.Random.Forest & x$detectability.predicted.by.Random.Forest<=windowStop),'Peptide']))
ident/total
}, conseq.matrix.train, peptide.train.conseq)
xy[,'sss']=sss
x=xy[,'c2']
y=xy[,'sss']
model <- lm(y ~ poly(x,3))
predicted.intervals.test=predict(model,data.frame(x=conseq.matrix.test[,'detectability.predicted.by.Random.Forest']),interval='confidence',level=0.99)
conseq.matrix.test[,'sss']=predicted.intervals.test[,1]
write.table(conseq.matrix.test, file=conseq.test.outfile, sep="\t", row.names=FALSE) |
9c32de42e9d95e0ad0f23dd3248142645f91a496 | b1b44c6570174adef2a32c550b88438de069f1cf | /courses/stat587Eng/labs/lab01/lab01.R | 2868dedff72e3738df9d5fbb464cc365cdd99996 | [] | no_license | AnnieLima/jarad.github.com | 49b8378b5d4d7b4cf17e0f39f20b325eb5009c54 | 07c6fea2842ad2085dfcb2d9d41b0a6c6e5b611c | refs/heads/master | 2023-08-18T11:03:51.491294 | 2021-10-13T16:34:28 | 2021-10-13T16:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,038 | r | lab01.R | ## --------------------------------------------------------------------------------------------------------------
a = 1
b = 2
a+b
## --------------------------------------------------------------------------------------------------------------
x = 1:10
y = rep(c(1,2), each=5)
m = lm(y~x)
s = summary(m)
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## x
## y
## m
## s
## s$r.squared
## --------------------------------------------------------------------------------------------------------------
1+2
1-2
1/2
1*2
## --------------------------------------------------------------------------------------------------------------
(1+3)*2 + 100^2 # standard order of operations
sin(2*pi) # the result is in scientific notation, i.e. -2.449294 x 10^-16
sqrt(4)
10^2
log(10) # the default is base e
log(10, base=10)
## --------------------------------------------------------------------------------------------------------------
a = 1
b = 2
a+b
a-b
a/b
a*b
## --------------------------------------------------------------------------------------------------------------
a <- 1
2 -> b
c = 3 # is the same as <-
## --------------------------------------------------------------------------------------------------------------
a
b
c
## --------------------------------------------------------------------------------------------------------------
# Rectangle
length <- 4
width <- 3
area <- length * width
area
# Circle
radius <- 2
area <- pi*radius^2 # this overwrites the previous `area` variable
circumference <- 2*pi*radius
area
circumference
# (Right) Triangle
opposite <- 1
angleDegrees <- 30
angleRadians <- angleDegrees * pi/180
(adjacent <- opposite / tan(angleRadians)) # = sqrt(3)
(hypotenuse <- opposite / sin(angleRadians)) # = 2
## ---- echo=FALSE-----------------------------------------------------------------------------------------------
# Find the probability the individual has the disease if
# specificity is 0.95, sensitivity is 0.99, and prevalence is 0.001
## --------------------------------------------------------------------------------------------------------------
a = 3.14159265
b = "STAT 587 (Eng)"
c = TRUE
## --------------------------------------------------------------------------------------------------------------
a
b
c
## --------------------------------------------------------------------------------------------------------------
a = c(1,2,-5,3.6)
b = c("STAT","587", "(Eng)")
c = c(TRUE, FALSE, TRUE, TRUE)
## --------------------------------------------------------------------------------------------------------------
length(a)
length(b)
length(c)
## --------------------------------------------------------------------------------------------------------------
class(a)
class(b)
class(c)
## --------------------------------------------------------------------------------------------------------------
1:10
5:-2
seq(from = 2, to = 5, by = .05)
## --------------------------------------------------------------------------------------------------------------
rep(1:4, times = 2)
rep(1:4, each = 2)
rep(1:4, each = 2, times = 2)
## --------------------------------------------------------------------------------------------------------------
a = c("one","two","three","four","five")
a[1]
a[2:4]
a[c(3,5)]
a[rep(3,4)]
## --------------------------------------------------------------------------------------------------------------
a[c(TRUE, TRUE, FALSE, FALSE, FALSE)]
## --------------------------------------------------------------------------------------------------------------
a[-1]
a[-(2:3)]
## --------------------------------------------------------------------------------------------------------------
a[2] = "twenty-two"
a
a[3:4] = "three-four" # assigns "three-four" to both the 3rd and 4th elements
a
a[c(3,5)] = c("thirty-three","fifty-five")
a
## --------------------------------------------------------------------------------------------------------------
m1 = cbind(c(1,2), c(3,4)) # Column bind
m2 = rbind(c(1,3), c(2,4)) # Row bind
m1
all.equal(m1, m2)
m3 = matrix(1:4, nrow = 2, ncol = 2)
all.equal(m1, m3)
m4 = matrix(1:4, nrow = 2, ncol = 2, byrow = TRUE)
all.equal(m3, m4)
m3
m4
## --------------------------------------------------------------------------------------------------------------
m = matrix(1:12, nrow=3, ncol=4)
m
m[2,3]
## --------------------------------------------------------------------------------------------------------------
m[1:2,3:4]
## --------------------------------------------------------------------------------------------------------------
m[1:2,]
## --------------------------------------------------------------------------------------------------------------
m[-c(3,4),]
## --------------------------------------------------------------------------------------------------------------
m[1:4]
## --------------------------------------------------------------------------------------------------------------
c(1,"a")
## --------------------------------------------------------------------------------------------------------------
c(TRUE, 1, FALSE)
## --------------------------------------------------------------------------------------------------------------
c(TRUE, 1, "a")
## --------------------------------------------------------------------------------------------------------------
m = rbind(c(1, 12, 8, 6),
c(4, 10, 2, 9),
c(11, 3, 5, 7))
m
## ---- echo=FALSE-----------------------------------------------------------------------------------------------
# Print the element in the 3rd-row and 4th column
# Print the 2nd column
# Print all but the 3rd row
# Reconstruct the matrix if time allows
## --------------------------------------------------------------------------------------------------------------
class(warpbreaks) # warpbreaks is a built-in data.frame
## --------------------------------------------------------------------------------------------------------------
warpbreaks[1:3,1:2]
## --------------------------------------------------------------------------------------------------------------
names(warpbreaks)
warpbreaks[1:3, c("breaks","wool")]
## --------------------------------------------------------------------------------------------------------------
str(warpbreaks)
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## install.packages("ggplot2")
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## install.packages("tidyverse")
## ---- eval = FALSE---------------------------------------------------------------------------------------------
## install.packages("devtools")
## ---- eval = FALSE---------------------------------------------------------------------------------------------
## devtools::install_github("jarad/swgoh")
## ---- eval = FALSE---------------------------------------------------------------------------------------------
## library("ggplot2")
## ---- eval = FALSE---------------------------------------------------------------------------------------------
## devtools::install_github("jarad/swgoh")
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## install.packages("swirl")
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## library("swirl")
## swirl()
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## ?mean
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## help.search("mean")
## ---- eval=FALSE-----------------------------------------------------------------------------------------------
## ?ggplot
## ?geom_point
|
487cfe09394e1853b971186dc994cb57916ba667 | 27a831dc58b73697dca0dc9bb85f5c6cc7e13bfc | /R/EMNearestMeanClassifier.R | e38ab0b26a138f694069b58b664350dabdc340ad | [] | no_license | cran/SSLR | b113137a98d1410311c896980fbea325667b51e9 | 806b4f7c470c4a7615cadae0b15747aef80cc818 | refs/heads/master | 2021-08-17T22:53:29.136291 | 2021-07-22T07:10:07 | 2021-07-22T07:10:07 | 249,203,588 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,536 | r | EMNearestMeanClassifier.R | #' @title General Interface for EMNearestMeanClassifier model
#' @description model from RSSL package
#' Semi-Supervised Nearest Mean Classifier using Expectation Maximization
#'
#' Expectation Maximization applied to the nearest mean classifier assuming Gaussian classes with a spherical covariance matrix.
#'
#' Starting from the supervised solution, uses the Expectation Maximization algorithm (see Dempster et al. (1977)) to iteratively update the means and shared covariance of the classes (Maximization step) and updates the responsibilities for the unlabeled objects (Expectation step).
#' @param method character; Currently only "EM"
#' @param scale Should the features be normalized? (default: FALSE)
#' @param eps Stopping criterion for the maximinimization
#' @references Dempster, A., Laird, N. & Rubin, D., 1977. Maximum likelihood from incomplete data via the EM algorithm. Journal of the Royal Statistical Society. Series B, 39(1), pp.1-38.
#' @example demo/EMNearestMeanClassifier.R
#' @importFrom RSSL EMNearestMeanClassifier
#' @export
EMNearestMeanClassifierSSLR <- function(method = "EM", scale = FALSE,
eps = 1e-04) {
train_function <- function(x, y) {
load_RSSL()
number_classes <- length(levels(y))
#Check binary problem
if (number_classes > 2) {
stop("EMNearestMeanClassifierSSLR is for binary problems")
}
list_values <- get_x_y_And_unlabeled(x, y)
model <- RSSL::EMNearestMeanClassifier(X = list_values$x, y = list_values$y, X_u = list_values$X_u,
method = method, scale = scale,
eps = eps)
result <- list(
model = model
)
result$classes = levels(y)
result$pred.params = c("class","raw")
result$mode = "classification"
class(result) <- "EMNearestMeanClassifierSSLR"
return(result)
}
args <- list(
method = method, scale = scale,
eps = eps
)
new_model_sslr(train_function, "EMNearestMeanClassifierSSLR", args)
}
#' @title Predict EMNearestMeanClassifierSSLR
#' @param object is the object
#' @param x is the dataset
#' @param ... This parameter is included for compatibility reasons.
#' @method predict EMNearestMeanClassifierSSLR
#' @importFrom stats predict
#' @importFrom magrittr %>%
predict.EMNearestMeanClassifierSSLR <- function(object, x, ...) {
result <- object$model %>% predict(x)
result
}
|
1baf24588119468cb129efcc50e3d01eb64ff04e | d355cd356683aba70b3f38b57fefcc8a145831e4 | /2.R | 47c32d7d8d5f7bb3b5b1ac7d4d70a2b727eebc0e | [] | no_license | marylt/r | b383ac189152324125ee8b785578e067ab50141c | c107b18cfac1bab62030168523aaf50d456898e4 | refs/heads/master | 2021-05-25T22:39:01.646455 | 2020-07-27T15:48:27 | 2020-07-27T15:48:27 | 253,951,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,086 | r | 2.R |
# 4a) 16 columns, 26217 rows
# 4b) NA = 153 , 5 = 133
table(nassCDS$injSeverity)
# 4b) 3 = 8495 , 4 = 1118 , % = 36.667%
8495+1118
9613/26217 * 100
# 4c)
table(nassCDS$dead, nassCDS$seatbelt)
prop.table(table(nassCDS$dead, nassCDS$seatbelt))
# 4d)
summary(nassCDS$yearVeh)
sd(nassCDS$yearVeh, na.rm = TRUE)
# 4e)
hist(nassCDS$yearVeh,
main="Histogram for Car Year",
xlab="Car Year",
border="blue",
col="green")
# 5a)
table(gss$HRS1)
summary(gss$HRS1)
levels(gss$HRS1)
typeof(gss$HRS1)
class(gss$HRS1)
# 5b)
hours1 <- car::recode(gss$HRS1,"'89+ hrs'=89")
hours1 <-factor(hours1)
levels(hours1)
hours1 <- as.numeric(as.character(hours1))
table(gss$HRS1)
table(hours1)
# 5c)
summary(hours1)
class(gss$HRS1)
nlevels(gss$HRS1)
levels(gss$HRS1)
work.lev <- c("Part-time","Full time","More than full-time")
gss$work3 <-factor(
ifelse(hours1 <= 39, 1,
ifelse(hours1 > 39 & hours1 <= 40, 2,
ifelse(hours1 > 40, 3,
NA))), labels=work.lev, ordered=T)
table(gss$work3)
|
8575868cbe6473f7353abfdb90df1cd2be6f44c7 | c6dced112ced3307f4a1a04b7b50f6464222adc6 | /aim_1/09_prep_disease_data.R | f2b861e33d4e5b51187dbb23b41a3ce7dd3fb5e9 | [] | no_license | mar13792/uw-phi-vax | d4f93e7b6a0062016e6f82fdd1995f5e5c8a3fee | 3606068316468808a92cc78cc99aee725f819799 | refs/heads/main | 2023-09-03T08:41:55.043257 | 2021-11-16T19:26:45 | 2021-11-16T19:26:45 | 428,534,120 | 0 | 0 | null | 2021-11-16T05:53:21 | 2021-11-16T05:53:21 | null | UTF-8 | R | false | false | 3,082 | r | 09_prep_disease_data.R | # Author: Francisco Rios
# Purpose: read and prep vaccine-preventable disease trend data
# Date: Last modified October 28, 2021
####### read in vaccine-preventable disease trend data
# Read in list of files to prep
file_list <- data.table(read_excel(paste0(g_drive, "data/list_of_data_used.xlsx")))
# subset files to latest disease trends data
file_list <- file_list[data_type=="disease_trends" & year=="2019"]
print("Now prepping:")
for(i in 1:nrow(file_list)){
# Set up file path
file_dir = paste0(raw_data_dir, file_list$data_type[i], '/', file_list$data_source[i], '/' )
# set up arguments
args <- list(file_dir, file_list$file_name[i], file_list$data_type[i])
### RUN THE PREP FUNCTION HERE ###
tmpData = do.call(prep_dx_trend_data, args)
#Add indexing data
append_cols = file_list[i, .(file_name, data_type, data_source)]
stopifnot(nrow(append_cols)==1)
tmpData = cbind(tmpData, append_cols)
#Bind data together
if(i==1){
prepped_dx_data = tmpData
} else {
prepped_dx_data = rbind(prepped_dx_data, tmpData, use.names=TRUE, fill = TRUE)
}
print(paste0(i, " ", file_list$data_type[i], " ", file_list$disease[i], " ", file_list$file_name[i])) ## if the code breaks, you know which file it broke on
}
# formatting of data ----
# load recently extracted data
dx_dt <- prepped_dx_data
# subset columns of interest
dx_dt <- dx_dt %>%
select(measure_name, location_id, location_name,
cause_id, cause_name, metric_name,
year, val, upper, lower, file_name)
# rename columns for consistency
setnames(dx_dt, old = c("year"), new = c("year_id"))
# recode YDL values
dx_dt <- dx_dt %>%
mutate(measure_name = recode(measure_name, Deaths='deaths', `YLDs (Years Lived with Disability)`='ylds'),
metric_name = recode(metric_name, Number='number', Percent='percent', Rate='rate'))
# split into three datasets (number, percent, rate)
dx_numb <- dx_dt %>% filter(metric_name=="number")
dx_rate <- dx_dt %>% filter(metric_name=="percent")
dx_perc <- dx_dt %>% filter(metric_name=="rate")
# pivot each data set wider
dx_numb <- dx_numb %>%
pivot_wider(
names_from = c(measure_name, metric_name),
names_glue = "{measure_name}_{metric_name}_{.value}",
values_from = c(val, upper, lower)
)
dx_rate <- dx_rate %>%
pivot_wider(
names_from = c(measure_name, metric_name),
names_glue = "{measure_name}_{metric_name}_{.value}",
values_from = c(val, upper, lower)
)
dx_perc <- dx_perc %>%
pivot_wider(
names_from = c(measure_name, metric_name),
names_glue = "{measure_name}_{metric_name}_{.value}",
values_from = c(val, upper, lower)
)
# bind datasets back together
mergeCols <- c("location_id", "location_name", "cause_id", "cause_name", "year_id", "file_name")
tidy_data <- dx_numb %>%
full_join(dx_rate, by = mergeCols) %>%
full_join(dx_perc, by = mergeCols)
# save prepped data
saveRDS(tidy_data, outputFile08)
# print final statement
print("Step 08: Reading in disease trend data completed.")
|
045e51a5ce064cc3bafdbc4f9e2450b91a0959fc | 619df0d3bb72bac2098e941062a4b4a588dd7ce1 | /CODES/merge_health_camp_details.R | 57e1083d57e7fcc9bb4d261e23c545b8490df674 | [] | no_license | piyushJaiswal/Knocktober | b73b038a15e04c4bb54863b87f92e25d43f76008 | 22e3b225b12e967a81c21e4ba12a1e014260361f | refs/heads/master | 2021-01-17T18:35:53.349607 | 2016-10-23T18:53:19 | 2016-10-23T18:53:19 | 71,532,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,070 | r | merge_health_camp_details.R | library(data.table)
train = read.csv("../DERIVED/train_with_Outcome.csv")
train = data.table(train)
any(is.na(train))
summary(train)
sapply(train,class)
train[,Registration_Date:=as.Date(Registration_Date)]
#.............................
test = read.csv("../RAW/Test_D7W1juQ.csv")
test = data.table(test)
any(is.na(test))
#.............................
health_camp <- read.csv("../RAW/Health_Camp_Detail.csv")
health_camp <- data.table(health_camp)
summary(health_camp)
sapply(health_camp, class)
health_camp[,":="(Camp_Start_Date = as.Date(Camp_Start_Date),
Camp_End_Date = as.Date(Camp_End_Date))]
health_camp$Category3 = as.factor(health_camp$Category3)
#.............................
train = merge(train, health_camp, by = c("Health_Camp_ID"), all.x=T)
any(is.na(train))
test = merge(test, health_camp, by = c("Health_Camp_ID"), all.x=T)
any(is.na(test))
#.............................
write.csv(train, file="../DERIVED/train_outcome_camp_details.csv", row.names = F)
write.csv(test, file="../DERIVED/test_camp_details.csv", row.names = F)
|
e1384e6bbadfb72203af7bf154abc4838ee1e74e | aaf995963a37ba9f7028ccc40a04bc8b4fe7ad48 | /man/make.landscape.Rd | 54d01f8bc092bb2ef1aa208278ef5588db787778 | [] | no_license | sokole/MCSim | b6791d6b8b8c40e5151f2493ce13f9962ac87468 | 37b12af90323efcb6b70d41709d7fec983edb8f9 | refs/heads/master | 2022-05-09T10:07:51.856441 | 2022-03-31T18:04:37 | 2022-03-31T18:04:37 | 17,027,948 | 6 | 6 | null | 2022-03-31T16:37:14 | 2014-02-20T16:55:17 | R | UTF-8 | R | false | true | 2,562 | rd | make.landscape.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make.landscape.R
\name{make.landscape}
\alias{make.landscape}
\alias{fn.make.landscape}
\title{Make a simulation landscape}
\usage{
make.landscape(JM = 10000, m = 0.1)
make.landscape(site.coords = c(1:10), m = 0.1, JM = 10000)
}
\arguments{
\item{site.info}{A data frame with site information}
\item{site.coords}{A data.frame of site coordinates. Can be 1, 2, or more dimensions}
\item{dist.mat}{Alternative to site.coords. Can be a distance matrix or a network map from the igraph package}
\item{JL}{Scalar or vector number of individuals at each site, overrides JM}
\item{JM}{Total number of individuals to include in a MCSim simulation.}
\item{m}{Immigration rate paramter, from Hubbells neutral model. Overrides I.rate.m2.}
\item{I.rate.m2}{Alternative to m, immigration rate in number of individuals / m2 / timestep. Default is 1.}
\item{area.m2}{Area of each site in m2. Default is 1.}
\item{Ef.specificity}{Vector of specificity values for environmental filters at each site. If 0 (default), site habitat value is modeled as a single point along an environmental gradient. If > 0, a site's habitat is modeled as a normal curve around a point on an environmental gradient.}
\item{Ef}{Vector of habitat scores for each site.}
\item{guess.site.coords}{Binary. If TRUE, Uses PCoA to extract site coordinates if given a distance matrix or network map. Useful to make a map to display sites. Not necessary if igraph input is used because igraph has a function to plot a network map. Default is FALSE.}
\item{list.of.stuff}{A list that can be used to store other landscape attributes in the landscape object. Useful for storing igraph properties when igraph is used.}
}
\description{
Define the attributes of a MCSim landscape, including number of sites, area, carrying capacity, and local immigration rates.
}
\details{
There are two steps to creating a metacommunity simulation in MCSim:
1. Make a "landscape" -- The landscape is the “game board” on which the simulation plays out, and it is created using the make.landscape function.
2. Run the simulation -- Once the landscape is created, you can pass the landscape object to metaSIM along with parameter settings that define the rules for how metacommunity dynamics will play out in the metacommunity simulation. Note that the current version of MCSim is zero sum, which means there will always be JM individuals in the simulation during each generation.
For a tutorial, see \url{http://rpubs.com/sokole/159425}
}
|
37b499357021cf2aeb7d0623ec3ead736ee1bac4 | d579f5507787282628c6427cd2377af3eb4021db | /R/FPS_imputation.R | e383cdf5bfe3c970190ef2b24d44a171936b98a4 | [
"MIT"
] | permissive | mscsep/SAM_FGT | d3c8d2790018705e81606dab62711f5af3566f6d | 865ecec9161c1f72774ae7f3ebd402e77a90020d | refs/heads/main | 2023-04-28T14:24:00.405788 | 2021-05-12T07:38:07 | 2021-05-12T07:38:07 | 366,632,177 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,333 | r | FPS_imputation.R | # Imputation Fear potentiated startle (EMG eyeblink) data from the Fear Generalization Task (FGT) in the SAM study
# Written by Rosalie Gorter & Milou Sep.
# NOTE: The function "Impute_FGT_EMG_SAM" is used twice in this script:
# 1) to impute individual trials (set sorttype to 'trials').
# - Note: trials will be imputed if more than 1/3 of the trials in a category is present (in other words if <2/3 missing),
# if less than 1/3 is present (in other words if >2/3 is missing; `missing code 4`) all the trials (for that category) will be set to missing
# 2) to create imputed means (set sorttype to 'mean')
# - Note: means will be based on imputed trials if more than 2/3 of trials is present (in other words if <1/3 missing; `missing code 1`),
# or imputed directly if less than 2/3 of the trials is present (or in other words if >1/3 missing; `missing code 2` ).
#install.packages("mice")
library("mice")
# install.packages("readr")
library(readr)
# Read Data ---------------------------------------------------------------
# (Note: Only valid digital Biopac cues analyzed)
FGT_batch <- read_delim("data/SAM_FGT.csv", ";",locale=locale(decimal_mark = ","), escape_double = FALSE, trim_ws = TRUE, na = c("NaN","5555","8888","9999"))
# Note Missing codes (assigned via Matlab preprocessing code):
# 5555 % Digital registration error, message on screen & MissingValue 5555.
# 8888 % Excessive baseline activity. (More than 2 SD deviation)
# 9999 % Latency onset not valid (Note, this only affects onset scores, so not present in magnitude variables)
# Prepare Data ------------------------------------------------------------
# remove "SMGH" from subject names to use numbers on x-as (& in subset)
FGT_batch$subjName <- gsub("SMGH","", FGT_batch$subjName)
# Make Condition Factor
FGT_batch$Condition <- as.factor(FGT_batch$Condition)
# Subset all Unstandardized startle magnitude variables (Umag)
Umag <- subset.data.frame(FGT_batch, select = c(grep("Umag", names(FGT_batch)),Condition))
# Note! Imputation now only done on Umag data (which is better than standardized data for LMM analyses). The dataset also contains Standardized startle magnitude variables (Smag), Peak latencies () and Onset Latencies are also available in FGT_batch.
# This script could also be used to impute Standardized FPS responses.
# Smag <- subset.data.frame(FGT_batch, select = c(grep("Smag", names(FGT_batch)),Condition))
# Imputation Settings -----------------------------------------------------
M<-100
MAXIT<-50
# Call imputation script --------------------------------------------------
# Impute separate trials (used for Habituation-trials, Cue-trials, Context-trials)
out_Umag_trials<-Impute_FGT_EMG_SAM(data=Umag, M=M, MAXIT=MAXIT, sorttype='trials') # call function below
save(out_Umag_trials, file = 'processed_data/OUPUT_IMPUTATIE_FGT_out_M50_MAXIT100_Umag_Trials.rda') # save Imputed datasets
# Impute means (used for inter-trial intervals)
out_Umag_all<-Impute_FGT_EMG_SAM(data=Umag, M=M, MAXIT=MAXIT, sorttype='mean') # call function below
save(out_Umag_all, file = 'processed_data/OUPUT_IMPUTATIE_FGT_out_M50_MAXIT100_Umag_AllMeans.rda') # save Imputed datasets
# Export original data ------------------------------------------------------
# Add subjectname to dataset (was removed for imputations, but required for further data analyses)
Umag$subject <- as.numeric(FGT_batch$subjName)
# Save input data to imputation file
save(Umag, file = 'processed_data/INPUT_IMPUTATIE_FGT_Umag.rda') # Raw dataset (needed to select `complete cases` for midsobject)
# Imputation Function Definition ------------------------------------------
Impute_FGT_EMG_SAM<-function(data,M,MAXIT,sorttype,...){
# DEFINE VARIABLE PATTERNS & NAMES ----------------------------------------
FGT_Variabels <- c('sA_Cue_.*_T', 'sA_Cue_.*_S', 'sA_Ctx_.*_T','sA_Ctx_.*_S', 'sA_ITI_', # Variables 1t/m5
'sA_HAB_', # Variable 6
'sA_HABctx_.*_T', 'sA_HABctx_.*_S','sA_HABctx_.*_N', # Variables 7,8,9
'sG_Cue_.*_T', 'sG_Cue_.*_S', 'sG_Cue_.*_N', # Variables 10,11,12
# Variables with only 3 trials per category:
'sG_Ctx_.*_T','sG_Ctx_.*_S', 'sG_Ctx_.*_N', 'sG_ITI_', # Variable 13, 14, 15, 16
'sG_HAB_' ) # Variable 17 [same method as variable 6]
FGT_Variabels_Names <- gsub(x=FGT_Variabels, pattern="_.*_", replacement='_', fixed=TRUE) # Note Fixed true is needed for r to 'interpret' .* as character and not as 'wildcard'
# SELECT & RESTRUCTURE FOR IMPUTATION -------------------------------------
restructure_FGTdata <-function(data_restruct, sorttype){ # Function to restructure data
# Make empty lists for the for-loop below
selected_FGT_data_allTrials <- list()
# In de for-loop below are all columns selected
# Select the columns that fit the variables in FGT_Variables vector
for(i in 1:length(FGT_Variabels)){
this_data <- data_restruct[,(grep(FGT_Variabels[i], names(data_restruct)))]
selected_FGT_data_allTrials[[i]] <- this_data
}
# Name elements in lists
names(selected_FGT_data_allTrials) <- FGT_Variabels_Names
# Return variable
return(selected_FGT_data_allTrials)
}
## Call restructure script that is defined above ##
ldata <- restructure_FGTdata(data, sorttype)
# COUNT & RECODE MISSINGS -------------------------------------------------
Recode_Missings_FGT<-function(sorttype){
# Function to count & code missing values in FGT startle data.
# 1) Specific for the imputation of means (sorttype = "all"):
# Note: Codes 0 = All present (0 missing values), 1 = Less than 1/3 missing ((or in other words if more than 2/3 present), 2 = More than 1/3 missing (or in other words if less than 2/3 present)
# Result: Code 1 creates means based on imputed trials, Code 2 indicates that the mean needs to be imputed directly.
if (sorttype == 'mean') { # If more than 1/3 missing: Code 2
n.missings <- code.missings <- list() # make empty list
for (i in 1:length(ldata)) {
n.missings[[i]] <- rowSums(is.na(cbind(ldata[[i]][, 1:ncol(ldata[[i]])]) * 1)) # Note n.missing is the number of missing trials PER participant WITHIN one category (e.g. Acquisition threat)
code.missings[[i]] <-
ifelse(n.missings[[i]] > (1/3 * ncol(ldata[[i]])), 2, 1) # [Note: ifelse(test, yes, no)]
code.missings[[i]] <-
ifelse(n.missings[[i]] == 0, 0, code.missings[[i]])}
# 2) Specific for the imputation of individual trials (sorttype = 'trials'):
# Note: Code 0 = All present (0 missing values); Code 4 = More than 2/3 missing (or in other words if less than 1/3 present)
# Result: Code 4 indicates that all trials will be set to missing; without code 4 (so less than 2/3 missing, or in other words more than 1/3 present) the individual trials are imputed.
} else if (sorttype == 'trials'){
n.missings <- code.missings <- list()
for (i in 1:length(ldata)) {
n.missings[[i]] <- rowSums(is.na(cbind(ldata[[i]][, 1:ncol(ldata[[i]])]) * 1))
code.missings[[i]] <-
ifelse(n.missings[[i]] > (2/3 * ncol(ldata[[i]])), 4, 1) # If more than 2/3 missing: Code 4
code.missings[[i]] <-
ifelse(n.missings[[i]] == 0, 0, code.missings[[i]])}
}
out <- list(code.missings, n.missings)
names(out) <- c("code", "n")
return(out)
}
# Call function above. Note: `Missings` is a list of two with `Missings[[1]]` = code.missings and `Missings[[2]]` = n.missings
Missings <- Recode_Missings_FGT(sorttype)
# Save trials & missings in list and as file
data_missings_trials<-list(ldata,Missings$n,Missings$code)
names(data_missings_trials)<-c("ldata","n.missings","code.missings")
saveRDS(data_missings_trials, "processed_data/data_missing_trials.rda")
if(sorttype == 'mean'){ # If imputation should create imputed means:
# CALCULATE MEAN STARTLES -------------------------------------------------
mean.startle<-matrix(unlist(lapply(ldata,rowMeans,na.rm=T)),nrow=nrow(data)) # True = omit missing values van calculations
mean.startle.completecases<-matrix(unlist(lapply(ldata,rowMeans,na.rm=F)),nrow=nrow(data)) # False = do not omit missing values van calculations [result: if a missing value is present, the mean is missing]
# Create & Assign variable names for means
FGT_Variabels_Names_Means <- paste0("mean_",FGT_Variabels_Names)
colnames(mean.startle) <- FGT_Variabels_Names_Means
colnames(mean.startle.completecases) <- FGT_Variabels_Names_Means
# SAVE MEANS --------------------------------------------------------------
#save data in list and in file
data_missings_means<-list(ldata,Missings$n,Missings$code,mean.startle)
names(data_missings_means)<-c("ldata","n.missings","code.missings","mean.startle")
saveRDS(data_missings_means, "processed_data/data_missings_means_all.rda")
# PREPARE DATA FOR IMPUTATION ---------------------------------------------
# generate dataset with only means based on more than 2/3 of trials (pulses)
for(i in 1:length(ldata)){
for(j in 1:nrow(data)){
if(Missings$code[[i]][j]==2){mean.startle[j,i]<-NA} # change the means that are based on less than 2/3 of trials (pulses) (code 2) to NA
}
}
# Add Condition to the matrix with means
mean.startle.c<-cbind(mean.startle,data$Condition)
colnames(mean.startle.c)[length(colnames(mean.startle.c))]<-"Condition"
}
# MAKE PREDICTION MATRIX --------------------------------------------------
# Note: the imputation on all trials is performed before trials are sorted in categories (so applicable to sorttype = 'trials' and sorttype = 'mean')
pred_allpulses = (1 - diag(1, ncol(data)))
rownames(pred_allpulses) <- colnames(data)
colnames(pred_allpulses) <- colnames(data)
pred_allpulses["Condition", 1:ncol(pred_allpulses)] <- 0
pred_allpulses[1:ncol(pred_allpulses)-1, "Condition"] <- 1
# Prediction matrix for imputation of means: specific for sorttype = 'mean' (not needed if sorttype = 'trials').
if (sorttype == 'mean'){
pred_means = (1 - diag(1, ncol(mean.startle.c)))
pred_means[18, 1:ncol(pred_means)] <- 0 # The number (18) indicate the variable location + 1 (because condition was added to matrix)
pred_means[1:ncol(pred_means)-1, 18] <- 1
}
# IMPUTATION --------------------------------------------------------------
## Trials
Startle_imputatie_pulses <- mice(data=data,
pred=pred_allpulses,
m = M,
maxit = MAXIT,
seed=356)
saveRDS(Startle_imputatie_pulses, "startle_pulses_imp")
## Means
if (sorttype == 'mean'){
Startle_imputatie_means <- mice(mean.startle.c,
pred=pred_means,
m = M,
maxit = MAXIT,
seed=356)
saveRDS(Startle_imputatie_means, "startle_means_imp")
}
# MERGE DATA --------------------------------------------------------------
dataout <- list() # Empty list
for(m in 1:M) { # number of Imputed datasets
## Create 3 complete datasets, and add suffix _i to all variable names:
# 1) Dataset with imputed trials (pulses) (for mean calculations & individual trials dataset)
Imputed_Pulses_complete <- complete(Startle_imputatie_pulses, m,include = F) # Note include = T, would include original data with missing values included.
colnames(Imputed_Pulses_complete) <- paste0(names(Imputed_Pulses_complete),"_i")
# 2) Only when sorttype = 'mean'
if (sorttype == 'mean'){
# 2A) Dataset with imputed means
Imputed_Means_complete <- complete(Startle_imputatie_means, m,include = F)
colnames(Imputed_Means_complete) <- paste0(names(Imputed_Means_complete),"_i")
# 2B) Dataset with calculated means based on dataset 1: the imputed trials (pulses)
ldataIMP <- restructure_FGTdata(Imputed_Pulses_complete, sorttype) # Imputed data sorted according to sorttype, using the 'restructure function'
Means_based_on_Imputed_Pulses<-list() # Create empty list
for (i in 1:length(ldataIMP)) { # fill empty mean list
this_mean <- rowMeans( cbind(ldataIMP[[i]]) ,na.rm=TRUE) # Calculate row means for all elements in ldataIMP
Means_based_on_Imputed_Pulses <- as.data.frame(cbind(Means_based_on_Imputed_Pulses, this_mean))} # add row mean to dataframe
# Add column names to dataset 2B
if (sorttype == 'mean'){colnames(Means_based_on_Imputed_Pulses) <- paste0(FGT_Variabels_Names_Means,"_i")}
## Create Merged means dataset. Note this dataset contains the appropriate mean (either from dataset 2A or 2B), based on the Missing$Code.
mergedmeans <- data.frame (matrix(0,nrow(data), length(ldata))) # Make empty matrix
# Add column names + suffix 'merged'
if (sorttype == 'mean'){colnames(mergedmeans) <- paste0(FGT_Variabels_Names_Means,"_merged")}
# Fill the `mergedmeans` matrix with the appropriate data, based on missing codes.
for(i in 1:length(ldata)){
for(j in 1:nrow(data)){
if(Missings$code[[i]][j]==1){mergedmeans[j,i]<-Means_based_on_Imputed_Pulses[j,i]} # if less than 1/3 Missing (or 1/3 missing)
if(Missings$code[[i]][j]==0){mergedmeans[j,i]<-Imputed_Means_complete[j,i]} # 0 missing
if(Missings$code[[i]][j]==2){mergedmeans[j,i]<-Imputed_Means_complete[j,i]} # if more than 1/3 missing
}
}
dataout[[m]] <- mergedmeans
# 3) Only when sorttype = 'trials'
} else if (sorttype == 'trials'){
# 3A) dataset with separate imputed trials, corrected for the `2/3 missing rule`, for analyses with "trials" as factor.
# Restructure imputed pulses (trials) (of imputed dataset m)
ldataIMP <- restructure_FGTdata(Imputed_Pulses_complete, sorttype) # Imputed data sorted according to sorttype, using the 'restructure function' (output in list)
# copy data to new list before transformations
ldataIMP_corrected <- ldataIMP
# Loop over all variable categories (the different elements in ldataIMP, 17 in total), e.g. threat acquisition, safe acquisition etc.
for (i in 1:length(ldataIMP)){ # length ldataIMP = 17
# Loop per variable category over all participants
for (j in 1:nrow(data)){ # nrow(data) = 117
# Check per variable category i for each participant j if the data (in that variable category) needs recoding.
# Missing Codes: 0 = All present, 4 = More than 2/3 missing
if(Missings$code[[i]][j]==4){ # when more than 2/3 of the trials in this category is missing
ldataIMP_corrected[[i]][j,] <- rep(NA,length(ldataIMP[[i]])) # rownumber j in dataset i, replaced by NA's for all trials (= length ldataIMP[[i]])
# For detailed checking of missing codes (if required)
# Missings$code[[1]]# a vector with a missing code for each participant in Threat acquisition (dataset 1) trials
# Missings$code[[1]][4] # a vector with a missing code for participant 4 in Threat acquisition (dataset 1) trials
}
}
}
# 3B Convert list, to dataframe
All.Trials.MissingCorrected<-cbind(data.frame(matrix(unlist(ldataIMP_corrected),nrow = 117)), Imputed_Pulses_complete$Condition_i)
colnames(All.Trials.MissingCorrected) <- colnames(Imputed_Pulses_complete) # add names
# Store in output list
dataout[[m]]<-All.Trials.MissingCorrected
}
}
# CREATE OUTPUT -----------------------------------------------------------
if (sorttype != 'trials'){
out<-list(dataout,mean.startle.completecases)
names(out)<-c("MergedMeans_imputed","Means_Completecases")
} else if (sorttype == 'trials'){
out<-list(dataout, data)
names(out)<-c("Trials_imputed","Trials_Original")
}
# Save output in the processed_data folder
save(out, file = 'processed_data/OUPUT_IMPUTATIE_FGT_out_endofscript.rda') # Imputed datasets
return(out)
}
|
444c11b2c99c942ebbdc9e2174788bb83a0cbddb | f761c920dab7459abe3bbcf03224e2da8964e76e | /man/get_ondridf_column_names_by_CATEGORICAL.Rd | 3801a430abaa5d89163fb9f6b6ec0d9123875649 | [] | no_license | ondri-nibs/ONDRIdf | 92b6f9ad49f57546a1ddc2e57e7b559361eaae83 | 508077263ffb1592cc7bb85f4e6e9d15d5640628 | refs/heads/main | 2023-03-25T05:32:30.212498 | 2021-03-12T04:06:32 | 2021-03-12T04:06:32 | 341,672,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 396 | rd | get_ondridf_column_names_by_CATEGORICAL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_ondridf_column_names_by_CATEGORICAL}
\alias{get_ondridf_column_names_by_CATEGORICAL}
\title{Get columns names by CATEGORICAL}
\usage{
get_ondridf_column_names_by_CATEGORICAL(ondridf)
}
\arguments{
\item{ondridf}{the ONDRI_df data frame}
}
\description{
Get all the column names of type CATEGORICAL
}
|
c5e30fb85bcb1145aab0bcad9d2ed74de72a8561 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/smog/man/plot.cv.cglasso.Rd | 867cd2496dfc94221561028aa0f5473791d4801c | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,318 | rd | plot.cv.cglasso.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv_cglasso.R
\name{plot.cv.cglasso}
\alias{plot.cv.cglasso}
\title{plot method for objects of \code{cv.cglasso} class}
\usage{
\method{plot}{cv.cglasso}(x, ...)
}
\arguments{
\item{x}{An fitted object in "cv.cglasso" class.}
\item{...}{Other graphical parameters to ggplot2.}
}
\description{
Yields a cross-validation curve, and error bars within one standard deviation of the curve,
as a function of the group penalty \eqn{\lambda_1}.
}
\examples{
# generate design matrix x
set.seed(2018)
n=50;p=20
s=10
x=matrix(0,n,1+2*p)
x[,1]=sample(c(0,1),n,replace = TRUE)
x[,seq(2,1+2*p,2)]=matrix(rnorm(n*p),n,p)
x[,seq(3,1+2*p,2)]=x[,seq(2,1+2*p,2)]*x[,1]
g=c(p+1,rep(1:p,rep(2,p))) # groups
v=c(0,rep(1,2*p)) # penalization status
label=c("t",rep(c("prog","pred"),p)) # type of predictor variables
# generate beta
beta=c(rnorm(13,0,2),rep(0,ncol(x)-13))
beta[c(2,4,7,9)]=0
# generate y
data=x\%*\%beta
noise=rnorm(n)
snr=as.numeric(sqrt(var(data)/(s*var(noise))))
y=data+snr*noise
cvfit=cv.cglasso(x,y,g,v,label,family="gaussian", nlambda.max = 20)
plot(cvfit)
}
\references{
\insertRef{ma2019structural}{smog}
}
\seealso{
\link{cv.cglasso}, \link{cv.smog}, \link{smog}.
}
\author{
Chong Ma, \email{chongma8903@gmail.com}.
}
|
befb5946639f9ffa33fd80b5e48aefef68a84798 | e14f7957195af2cadbf9b5102b9dfc7b62251cf5 | /peakAnalyzer/ChIPseqPipeline/peakOverlap/plotVenn2C.r | 33e2e9759d73b78797405db4b60bba3890436d82 | [] | no_license | zzz2010/basespace-ui | f8fcfe830de93f5ce31bf3d774a8c881a0048e80 | 663bb7607354465960fc8d1335275b5c7002a04d | refs/heads/master | 2021-01-10T20:26:26.757006 | 2014-03-18T02:50:12 | 2014-03-18T02:50:12 | 32,113,315 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,629 | r | plotVenn2C.r | venn.overlap <- function(r, a, b, target = 0)
{
#
# calculate the overlap area for circles of radius a and b
# with centers separated by r
# target is included for the root finding code
#
pi = acos(-1)
if(r >= a + b) {
return( - target)
}
if(r < a - b) {
return(pi * b * b - target)
}
if(r < b - a) {
return(pi * a * a - target)
}
s = (a + b + r)/2
triangle.area = sqrt(s * (s - a) * (s - b) * (s - r))
h = (2 * triangle.area)/r
aa = 2 * atan(sqrt(((s - r) * (s - a))/(s * (s - b))))
ab = 2 * atan(sqrt(((s - r) * (s - b))/(s * (s - a))))
sector.area = aa * (a * a) + ab * (b * b)
overlap = sector.area - 2 * triangle.area
return(overlap - target)
}
# takes in a list d
plot.venn.diagram <- function(d)
{
#
# Draw Venn diagrams with proportional overlaps
# d$table = 2 way table of overlaps
# d$labels = array of character string to use as labels
#
pi = acos(-1)
csz = 0.05
csz2 = 0.02
# Normalize the data
n = length(dim(d$table)) # 2 factors so n = 3?
c1 = vector(length = n)
c1[1] = sum(d$table[2, ]) # total count of peak1
c1[2] = sum(d$table[, 2]) # total count of peak2
n1 = c1
#
c2 = matrix(nrow = n, ncol = n, 0) # create a matrix of 2 rows, 2 cols and value = 0
c2[1, 2] = sum(d$table[2, 2]) # overlap count
c2[2, 1] = c2[1, 2]
n2 = c2
#
c2 = c2/sum(c1)
c1 = c1/sum(c1)
n = length(c1)
# Radii are set so the area is proporitional to number of counts
pi = acos(-1)
r = sqrt(c1/pi)
r12=0
if(min(c1)==c2[1,2])
r12=min(r)
else if (c2[1,2]==0)
r12=0
else
r12 = uniroot(venn.overlap, interval = c(max(r[1] - r[2], r[2] - r[1],
0) + 0.01, r[1] + r[2] - 0.01), a = r[1], b = r[
2], target = c2[1, 2])$root
s = r12/2 #correct?
x = vector()
y = vector()
x[1] = 0
y[1] = 0
x[2] = r12
y[2] = 0
xc = cos(seq(from = 0, to = 2 * pi, by = 0.01))
yc = sin(seq(from = 0, to = 2 * pi, by = 0.01))
cmx = sum(x * c1)
cmy = sum(y * c1)
x = x - cmx
y = y - cmy
rp=sqrt(x*x + y*y)
frame()
par(usr = c(-1, 1, -1, 1), pty = "s")
polygon(xc*r[1]+x[1], yc * r[1] + y[1], col="#0000ff70")
lines(xc*r[1]+x[1], yc * r[1] + y[1], col="#0000ff", lwd=2)
polygon(xc*r[2]+x[2], yc * r[2] + y[2], angle=180, col="#80000070")
lines(xc*r[2]+x[2], yc * r[2] + y[2], col="#800000", lwd=2)
legend("bottom","center",d$labels, col=c("#0000ff", "#800000"), pch=20, horiz=TRUE, pt.cex=2)
#print value
xl = (rp[1] + (0.7 * r[1])) * x[1]/rp[1]
yl = (rp[1] + (0.7 * r[1])) * y[1]/rp[1]
if(d$table[2,1]>0)
text(xl, yl, d$table[2,1])
xl = (rp[2] + (0.7 * r[2])) * x[2]/rp[2]
yl = (rp[2] + (0.7 * r[2])) * y[2]/rp[2]
if(d$table[1,2]>0)
text(xl, yl, d$table[1,2])
if(d$table[2,2]>0)
text((x[1] + x[2])/2 + csz, (y[1] + y[2])/2, d$table[2, 2]) #circle A and B intersect
list(r = r, x = x, y = y, dist = r12, count1 = c1, count2 =
c2, labels = d$labels)
}
Args<-commandArgs()[grep("^--",commandArgs(),invert=T)]
#main
inp = scan(Args[2], skip=1, list(first=0, second=0))
A = inp$first > 0
B = inp$second > 0
label = scan(Args[2], what="char", nlines = 1)
# Create a list which stores a table and labels.
d = list()
d$table <-matrix(0, 2, 2)
temp<-table(A,B)
if ("FALSE" %in% rownames(temp) && "FALSE" %in% colnames(temp))
d$table[1,1]=temp["FALSE","FALSE"]
if ("FALSE" %in% rownames(temp) && "TRUE" %in% colnames(temp))
d$table[1,2]=temp["FALSE","TRUE"]
if ("TRUE" %in% rownames(temp) && "FALSE" %in% colnames(temp))
d$table[2,1]=temp["TRUE","FALSE"]
if ("TRUE" %in% rownames(temp) && "TRUE" %in% colnames(temp))
d$table[2,2]=temp["TRUE","TRUE"]
d$table
d$labels = label
# Pass list d into the function plot.venn.diagram
png(paste(Args[2],".venn.png",sep = "")) ## set the device to print to png
plot.venn.diagram(d)
dev.off()
|
64cc41c1bf7579adf039e434c4fd983081e0afbb | 12676471ec4e7015048e854817b3c253828df917 | /lab_07/lab_07_before.R | e40b6c0a7dbaf71bfd8c589442410f955289534a | [] | no_license | bdemeshev/coursera_metrics | 9768a61e31e7d1b60edce9dde8e52f47bbd31060 | a689b1a2eed26816b2c5e4fd795136d5f9d1bb4f | refs/heads/master | 2021-11-01T14:15:56.877876 | 2021-10-24T13:16:59 | 2021-10-24T13:16:59 | 23,589,734 | 19 | 58 | null | 2021-07-25T10:51:56 | 2014-09-02T18:07:06 | HTML | UTF-8 | R | false | false | 1,213 | r | lab_07_before.R | # Esli russkie bukvi prevratilitis v krakozyabry, to File - Reopen with
# encoding... - UTF-8 - Set as default - OK
# lab 07
# подключаем пакеты
library(mfx) # расчет предельных эффектов
library(vcd) # графики для качественных данных
library(reshape2) # манипуляции с данными
library(skimr) # описательные статистики (вместо psych в видеолекциях)
library(AUC) # для ROC кривой
library(rio) # импорт файлов разных форматов
library(tidyverse) # графики и манипуляции с данными, подключаются пакеты dplyr, ggplot2, etc
# при загрузке файлов R автоматом переделывает все строковые переменные в
# факторные эта шаманская команда просит R так не делать :)
options(stringsAsFactors = FALSE)
# читаем данные по пассажирам Титаника
t <- import("titanic3.csv")
# источник и описание:
# http://lib.stat.cmu.edu/S/Harrell/data/descriptions/titanic.html
|
d9fd44ea8b5374e72dfcd138839c552c6906e4bb | a15b588f0417c4c3660abeec016ee0a4e076144b | /man/calculate.Rd | 5f2a94c1e0b27f5e6bbac62df9f6f888634c0da7 | [] | no_license | Hornet47/RProjektEWS | e062dabe9be7f369b6fa143d6b4f95611eb592f1 | ec6fc056bc6b3c76a84656f7624851f6e49f8c7c | refs/heads/main | 2023-03-04T12:29:22.298921 | 2023-02-15T07:34:31 | 2023-02-15T07:34:31 | 337,612,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 617 | rd | calculate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate.R
\name{calculate}
\alias{calculate}
\title{Berechne die numerische Zusammenfassung}
\usage{
calculate(data = getFromAPI())
}
\arguments{
\item{data}{die Daten, auf den die Berechnung basiert.}
}
\value{
ein tibble von der numerische Zusammenfassung.
}
\description{
\code{calculate} berechnet die numerische Zusammenfassung von \code{data}.
}
\details{
Dies ist eine Funktion, die die median von marketing_start_date berechnet in
den Untergruppen von finished und dosage_form.
}
\examples{
calculate()
calculate(getFromAPI())
}
|
ca28e7dbcd264901932488e1f39f93cbfe9d74b0 | 8391972afa0eb226c18f9dd0c3f594798107da2f | /plot2.R | 51ee84de63e5734981ad82d2d9dbca93349df90f | [] | no_license | maelfosso/ExData_Plotting1 | b20e03ab2cc9a5c78aec7c508f5967722d452169 | 2657f18191d454032210326992c46d7e364b68b0 | refs/heads/master | 2021-01-15T10:36:52.530651 | 2014-10-14T14:01:18 | 2014-10-14T14:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 650 | r | plot2.R | # Read data
hpc <- read.table("household_power_consumption.txt", header = T, sep = ';', na.strings = '?')
# Convert date column as a date
hpc$Date <- as.Date(hpc$Date, format = "%d/%m/%Y")
# Convert time column as a time
hpc$Time <- strptime(paste(hpc$Date, hpc$Time), format = "%Y-%m-%d %H:%M:%S")
# Subset the data
shpc <- hpc[which(hpc$Date == '2007-02-01' | hpc$Date == '2007-02-02'), ]
# Create the plot
plot(shpc$Time , shpc$Global_active_power , type = "l", ylab = "Global Active Power(kilowatts)", xlab = "")
# Copy to a PNG file
dev.copy(png, file ="plot2.png", width = 480, height = 480, units = "px")
# Close the graphics device
dev.off()
|
5e9479443718c0cf9657b27b7f8ad2aa8d4fbca9 | fe612f81a3118bf3ebef644bae3281bd1c156442 | /man/h2o.cross_validation_holdout_predictions.Rd | 5b9777aa9bd0765449311a5e2529317a62338673 | [] | no_license | cran/h2o | da1ba0dff5708b7490b4e97552614815f8d0d95e | c54f9b40693ae75577357075bb88f6f1f45c59be | refs/heads/master | 2023-08-18T18:28:26.236789 | 2023-08-09T05:00:02 | 2023-08-09T06:32:17 | 20,941,952 | 3 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,108 | rd | h2o.cross_validation_holdout_predictions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{h2o.cross_validation_holdout_predictions}
\alias{h2o.cross_validation_holdout_predictions}
\title{Retrieve the cross-validation holdout predictions}
\usage{
h2o.cross_validation_holdout_predictions(object)
}
\arguments{
\item{object}{An \linkS4class{H2OModel} object.}
}
\value{
Returns a H2OFrame
}
\description{
Retrieve the cross-validation holdout predictions
}
\examples{
\dontrun{
library(h2o)
h2o.init()
f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv"
cars <- h2o.importFile(f)
cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"])
predictors <- c("displacement","power","weight","acceleration","year")
response <- "economy_20mpg"
cars_split <- h2o.splitFrame(data = cars,ratios = 0.8, seed = 1234)
train <- cars_split[[1]]
valid <- cars_split[[2]]
cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train,
nfolds = 5, keep_cross_validation_predictions = TRUE, seed = 1234)
h2o.cross_validation_holdout_predictions(cars_gbm)
}
}
|
2ed597a702eb996dd0b13627f308c29d2b949f70 | b80f3c28e96b28ef04f2f31381e01c789443a882 | /afl_read.R | ce9f224581176bf340d1ba8c3adb3690b121e253 | [] | no_license | BFTimClarke/AFL_modelV2 | 7ccd52348f93039637c9ac1cff229c9fd17a4e85 | f0e1de38e500598bb8f91e867a24f24cec7ab256 | refs/heads/master | 2020-05-28T08:21:57.453558 | 2019-05-28T02:09:05 | 2019-05-28T02:09:05 | 188,936,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 246 | r | afl_read.R | #IMPORT CSV FILES TO R
library(BradleyTerryScalable)
afl_odds=read.csv("afl_odds.csv")
afl_footywire_data=read.csv("afl_footywire_data.csv")
afl_results_data=read.csv("afl_results_data.csv")
afl_tables_data=read.csv("afl_tables_data.csv") |
fdae123064a20d903f0d5af0d9b0487ad298c996 | 3f2ba0b5c2a72d90246dff3005ad9c3049123550 | /R/mftools.R | 4144719af29c08667018e1ab595564399ac356cc | [] | no_license | asgr/mftools | 7ee52f62c74959eb59fc35b61ae92257ab98a3f3 | 0db4226539ee6b299497cd5ff53e4814476205e9 | refs/heads/master | 2021-01-21T18:11:02.794420 | 2017-05-22T06:52:21 | 2017-05-22T06:52:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | mftools.R | #' Fitting mass and luminosity functions
#'
#' \pkg{mftools} optimally fits galaxy mass functions (MFs) to a set of astronomical data points with/without measurement uncertainties. No binning of the data is used in the fitting. The package can also be used to fit related functions such as halo MFs and luminosity functions.
#'
#' The functions you're most likely to need from \pkg{mftools} are \code{\link{mffit}} and
#' \code{\link{mfplot}}. See the documentation of these functions for details.
"_PACKAGE"
#> [1] "_PACKAGE"
|
500645d48d1d23b1be863b47fddf38a012eb37d7 | f33558b6920844e4fe2ebf561cd36cb2bd95ecde | /InterfaceWeb/src/1nn.R | 7b51d9e3afb221707bf601a4d378a991dfc74103 | [] | no_license | rouillal/classificationImage | 006bd336caba26f2cec7ddfc582235e7dcf58d5a | fe88bae6ac36c78e1475ff31a8e7d72e713a341d | refs/heads/master | 2021-03-13T04:08:58.228159 | 2017-03-09T15:26:21 | 2017-03-09T15:26:21 | 84,456,033 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | r | 1nn.R | #!/usr/bin/R
# ${1} feature vectors file on which the centroids are computed (ideally subset of ${5})
# ${2} codebook size
# ${3} directoy where we want to save the results
# ${4} number max of iteration for clustering
# ${5} feature vectors file of the whole corpus on which the mapping is applied
# R --slave --no-save --no-restore --no-environ --args '/home/data/collection/clef/2010/PhotoAnnotation/GENERATED/CODEBOOKS/random_RGSIFT_Vectors.txt' 4000 '/home/data/collection/clef/2010/PhotoAnnotation/GENERATED/CODEBOOKS/RGSIFT4000CB/' 50 < clustering_script.R
cmd_args=commandArgs();
centers=read.table(cmd_args[7]);
# k=kmeans(tmptable,as.integer(cmd_args[8]),as.integer(cmd_args[10]));
library(class)
corpustable=read.table(cmd_args[9], sep=" ", colClasses="numeric", comment.char="");
knnres=knn1(centers, corpustable,factor(c(1:as.integer(cmd_args[8]))))
write.table(knnres,paste(as.name(cmd_args[10]), sep=""),row.names = FALSE, col.names=FALSE, quote=FALSE);
|
2fbd3386a4ea8ad4b2520c3215bfdaa2cf2bb7aa | ebee9629abd81143610a6352288ceb2296d111ac | /man/pcf_anisotropic.Rd | 0e794cd22ccacfbcb4fcd7644e010f233b791f9c | [] | no_license | antiphon/Kdirectional | 76de70805b4537a5aff0636486eb387cb64069b0 | 98ab63c3491f1497d6fae8b7b096ddd58afc4b29 | refs/heads/master | 2023-02-26T02:19:41.235132 | 2023-02-12T13:07:11 | 2023-02-12T13:07:11 | 37,183,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,386 | rd | pcf_anisotropic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcf_anisotropic.R
\name{pcf_anisotropic}
\alias{pcf_anisotropic}
\title{Anisotropic pair correlation function}
\usage{
pcf_anisotropic(
x,
r,
u,
h,
stoyan = 0.15,
lambda,
lambda_h,
renormalise = TRUE,
border = 1,
divisor = "d",
...
)
}
\arguments{
\item{x}{pp, list with $x~coordinates $bbox~bounding box}
\item{r}{radius vector at which to estimate}
\item{u}{direction to be converted to angles. Use \code{\link{angle_2_unit}} to transform from angles.}
\item{h}{half-widths of epanechnicov kernels, vector of two values, one for ranges and and one for angles.}
\item{stoyan}{If h not given, use h=c( stoyan/lambda^(1/dim), stoyan*pi) (cf. \code{pcf.ppp}-function in \code{spatstat}-package).}
\item{lambda}{optional vector of intensity estimates at points}
\item{lambda_h}{if lambda missing and lambda_h is given, use this bandwidth in a kernel estimate of lambda(x). Otherwise lambda is set to constant.}
\item{renormalise}{Scale lambda to align with Campbell formula.}
\item{border}{Use translation correction? Default=1, yes. Only for cuboidal windows.}
}
\description{
Estimate the anisotropic pair correlation function (2d and 3d), as defined in f 1991, f. 5.2-5.3.
}
\details{
The antipode symmetry makes it necessary to compute only on one half of the circle/sphere.
}
|
538a490c69630abba69d7f1ebd9cc5a32a5014ce | 6dfd9fdc04641ec8e8fa931d88c32ac4d543d587 | /Clustering/Clustering_25_01.R | 81a08a4b94732a21b8856250893c3e80f2ef4f8a | [] | no_license | remyJardillier/seminaire_ECP | ff961b2a40c2fa3f14a42e1d903ff363ae5d019b | d93c978e3a5af37f216da61aaf910174340530e1 | refs/heads/master | 2021-01-13T14:55:52.497751 | 2017-03-22T21:30:00 | 2017-03-22T21:30:00 | 79,353,365 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,849 | r | Clustering_25_01.R | setwd("/Users/lucieraguet/Documents/R/Seminaire_ECP/Fiches_Sinistres")
# library importations
library(tm) # Framework for text mining.
library(RColorBrewer) # Generate palette of colours for plots.
library(qdapDictionaries)
library(qdap) # Quantitative discourse analysis of transcripts.
library(dplyr) # Data wrangling, pipe operator %>%().
library(ggplot2) # Plot word frequencies.
library(scales) # Include commas in numbers.
library(magrittr)
library(Rgraphviz)
library(wordcloud)
library(lsa)
library(parallel)
require(rJava) # needed for stemming function
require(SnowballC)
library(dplyr)
library(proxy)
library(stats) # hierarchical clustering and kmeans
library(cluster) # to plot kmeans
# Voir si indispensable, pas dispo pour cette version de R
library(multicore)
install.packages("multicore")
# import pdf file names
#file_name =c( "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/BancoInvex/15.01.12 - (01) BANCO INVEX SA FIDEICOMIS NO 192 FINAL - 01563814 (ENG) .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/CaboReal/15.06.03 - CABO REAL (DREAMS & CASA DEL MAR & GANZO IR6 [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/Costco/15.05.14 - COSTCO - 01562914 - FINAL [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/DesarrolloMarinaVallarta/15.03.13 - DESARROLLO MARINA VALLARTA 01561914 Mayan Los Cabos [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/FemsaComerco/15.02.09 - FEMSA COMERCO - 01623214 - ENG.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/FondoNacional/14.10.24 - FONDO NACIONAL - IP 01671914 - 4353-IR01.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/PuebloBonito/16.06.14 - PUEBLO BONITO - 01564314 - UPDATE REPORT.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/VillaSolaris/15.03.23 - VILLA SOLARIS - FINAL REPORT - 01567514 (ENG).PDF")
#Rpdf = readPDF(control = list(text = "-layout"))
#######################################
# create the corpus #
#######################################
#toSpace = content_transformer(function(x, pattern) { return (gsub(pattern, "", x))})
## Création Corpus à partir de fichiers PDF
# import pdf file names
#file_name =c( "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/BancoInvex/15.01.12 - (01) BANCO INVEX SA FIDEICOMIS NO 192 FINAL - 01563814 (ENG) .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/CaboReal/15.06.03 - CABO REAL (DREAMS & CASA DEL MAR & GANZO IR6 [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/Costco/15.05.14 - COSTCO - 01562914 - FINAL [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/DesarrolloMarinaVallarta/15.03.13 - DESARROLLO MARINA VALLARTA 01561914 Mayan Los Cabos [ENG] .pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/FemsaComerco/15.02.09 - FEMSA COMERCO - 01623214 - ENG.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/FondoNacional/14.10.24 - FONDO NACIONAL - IP 01671914 - 4353-IR01.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/PuebloBonito/16.06.14 - PUEBLO BONITO - 01564314 - UPDATE REPORT.pdf",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/Prov/VillaSolaris/15.03.23 - VILLA SOLARIS - FINAL REPORT - 01567514 (ENG).PDF")
#Rpdf = readPDF(control = list(text = "-layout"))
#doc = Corpus(URISource(file_name), readerControl = list(reader = Rpdf))
## Création Corpus à partir de fichiers .txt
#file_name_bis = c("/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/BancoInvex/BancoInvex.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/CaboReal/CaboReal.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/Costco/Costco.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/Decameron/Decameron.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/DesarrolloMarinaVallarta/DesarrolloMarinaVallarta.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/FemsaComerco/FemsaComerco.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/FondoNacional/FondoNacional.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/PuebloBonito/PuebloBonito.txt",
# "/Users/lucieraguet/Documents/R/Seminaire_ECP/fiches_lucie/VillaSolaris/VillaSolaris.txt")
#
#Rdoc = readDOC(control = list(text = "-layout"))
#docbis = Corpus(URISource(file_name_bis), readerControl=Rdoc)
# Chargement du corpus
load("corpus_lucie.Rdata")
#Exemple de texte
docs[[72]]$content
#Traitement du corpus
docs = tm_map(docs, removeWords, c("the", "and", stopwords("english")))
docs = tm_map(docs, removeNumbers)
docs = tm_map(docs, removePunctuation)
docs = tm_map(docs, stripWhitespace)
docs = tm_map(docs, stemDocument, language = "english")
#Transformation du Corpus en DocumentTermMatrix ou TermDocumentMatrix
dtm = DocumentTermMatrix(docs) #nom sur les colonnes, texte sur les lignes, fréquences dans la matrice
inspect(dtm[1,1:7])
tdm = TermDocumentMatrix(docs) #Transposée de tdm
inspect(tdm[1,1:7])
### Pour Lucie
mat_dtm = as.matrix(dtm)
View(mat_dtm) #permet de visualiser les données
View(col.names(mat_dtm))
mat_tdm=as.matrix(tdm)
View(mat_tdm) #permet de visualiser les données
####
#sparse term
k=0.8
dtms = removeSparseTerms(dtm, k)
inspect(dtms[1:7,1:7])
### Pour Lucie
mat_dtm=as.matrix(dtms)
dim(mat_dtm)
View(mat_dtm)
### Question: est ce qu'on fait un PCA ou non? On commence par essayer sans. On utilise daisy qui est
# plus souple que dist.
dist_dtm = daisy(mat_dtm,metric = "euclidean")
### Pour Lucie
View(as.matrix(dist_dtm))
### Hierarchical clustering
pdf("Clustering_hierarchiques_1.pdf",height=10,width = 10)
clust_h= hclust(dist_dtm, method="ward.D")
dev.off()
plot(clust_h, xlab=NULL, sub=NULL)
### Cut the tree to understand what causes the cuts
group_h=cutree(clust_h,h=750)
table(group_h)
## Looking at the text contained in group#4 we understand that the corpus creation failed in the sense
## that they are now empty. Therefore we delete them from the corpus and reiterate the hierarchical
## clustering
group1 = which(group_h==1)
group2 = which(group_h==2)
group3 = which(group_h==3)
group4 = which(group_h==4)
write.table(names(group3),"test.txt",sep=";")
docs = docs[-as.numeric(group3)]
## Reiterate the operations
dtm = DocumentTermMatrix(docs)
dtms = removeSparseTerms(dtm, k)
mat_dtm=as.matrix(dtms)
dim(mat_dtm)
dist_dtm = daisy(mat_dtm,metric = "euclidean")
# Avec Ward.2 Critère de min de variance au sein des clusters
clust_h= hclust(dist_dtm, method="ward.D2")
plot(clust_h,labels=FALSE, xlab=NULL, sub=NULL)
inertie = sort(clust_h$height, decreasing=TRUE)
plot(inertie, type="s",xlab="Nombre de Classes")
# On remarque deux sauts d'inertie importants pour 4/5 et 7/8 clusters. On retestera les hypothèses
# avec le kmeans
group_h=cutree(clust_h,h=300)
table(group_h)
# On obtient 6 classes mais on remarque le groupe de 4 fiches sur la branche de gauche est à l'origine de
# distinctions. On supprime donc ces éléments et on réitère les étapes.
docs = docs[-as.numeric(which(group_h==1))]
docs = docs[-as.numeric(which(group_h==3))]
docs = docs[-as.numeric(which(group_h==6))]
dtm = DocumentTermMatrix(docs)
dtms = removeSparseTerms(dtm, k)
mat_dtm=as.matrix(dtms)
dim(mat_dtm)
dist_dtm = daisy(mat_dtm,metric = "euclidean")
clust_h= hclust(dist_dtm, method="ward.D2")
pdf("Clustering_hierarchiques.pdf",height=10,width = 10)
plot(clust_h,labels=FALSE, xlab=NULL, sub=NULL, main="Clustering hiérarchique des fiches sinistres")
dev.off()
|
575fc39bc3496cac7b28f00745664a585530087f | f3478cb86d8167f4f098a8b0ca253e53625e7db8 | /Age_distribution.R | 56868900079255259fda02fb610532abc195a8b3 | [
"BSD-3-Clause"
] | permissive | jutzca/Corona-Virus-Meta-Analysis-2020 | 65de503df6ded85bdde297094d205bb873a555f1 | e8a44f354b0aa81bf42c12dfc95e4cd50fa21b29 | refs/heads/master | 2023-02-25T22:21:11.282292 | 2021-01-27T20:26:23 | 2021-01-27T20:26:23 | 250,991,019 | 2 | 0 | null | 2020-11-03T16:39:49 | 2020-03-29T08:59:15 | R | UTF-8 | R | false | false | 18,834 | r | Age_distribution.R | #--------------------Forrest Plot for Age Groups------------------------------------------------------------------------------------------------------
#Clear working space
rm(list=ls())
#Install libraries if required
if(!require(ggplot2)) install.packages("ggplot2")
if(!require(plyr)) install.packages("plyr")
if(!require(dplyr)) install.packages("dplyr")
if(!require(forcats)) install.packages("forcats")
if(!require(metamedian)) install.packages("metamedian")
if(!require(cowplot)) install.packages("cowplot")
#Load libraries
library(ggplot2)
library(plyr)
library(dplyr)
library(forcats)
library(metamedian)
library("cowplot")
#-------------------------Age distribution of adult COVID-19 patients------------------------------------------------------------------------------------------------------
#Load data
age_data <- read.csv("/Users/jutzelec/Documents/GitHub/Corona-Virus-Meta-Analysis-2020/Corona_review_demographics_all_patients.csv", sep =',', header = TRUE)
#Subset data: Adult COVID-19 patients, case series and cohort studies, and available information on median age
age_distr_adults <- subset(age_data,(!(is.na(Age_median_nd)) & (!(Study_type== 'Case Study')) & Study_Population =='Adult'))
#Show names of colimns
names(age_distr_adults)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_adults_overall<-pool.med(age_distr_adults$Age_median_nd, age_distr_adults$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_adults_overall
Q1_age_adults_overall<-pool.med(age_distr_adults$Age_Q1_nd, age_distr_adults$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_adults_overall
Q3_age_adults_overall<-pool.med(age_distr_adults$Age_Q3_nd, age_distr_adults$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_adults_overall
#Plot age distribution
age_adult_plot<- age_distr_adults %>%
mutate(study_sorted = fct_reorder(Study_nr2, desc(Age_median_nd))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Age_median_nd), ymin = Age_Q1_nd, ymax = Age_Q3_nd)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients)) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_adults_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Adults")+
scale_y_continuous(limits=c(0, 100), breaks=c(0, 25, 50, 75, 100))+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x = element_text(size=10),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 10, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4))))
age_adult_plot
#-------------------------Age distribution of pregnant COVID-19 patients-----------------------------------------------------------------------------------------------------
#Subset data: Pregnant COVID-19 patients-and available information on age
age_distr_pregnant<- subset(age_data,(!(is.na(Age_median_nd)) & (Study_Population =='Pregnant' )))
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_pregnants_overall<-pool.med(age_distr_pregnant$Age_median_nd,age_distr_pregnant$number_of_patients, norm.approx = T, coverage.prob = 1)
median_age_pregnants_overall
Q1_age_pregnants_overall<-pool.med(age_distr_pregnant$Age_Q1_nd,age_distr_pregnant$number_of_patients, norm.approx = T, coverage.prob = 1)
Q1_age_pregnants_overall
Q3_age_pregnants_overall<-pool.med(age_distr_pregnant$Age_Q3_nd,age_distr_pregnant$number_of_patients, norm.approx = T, coverage.prob = 1)
Q3_age_pregnants_overall
#Plot age distribution
age_pregnant_plot<-age_distr_pregnant %>%
mutate(study_sorted = fct_reorder(Study_nr2, desc(Age_median_nd))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Age_median_nd), ymin = Age_Q1_nd, ymax = Age_Q3_nd)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients), shape=18) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_pregnants_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Pregnant Women")+
scale_y_continuous(limits=c(0, 40), breaks=c(0, 20, 40))+
coord_flip() +
theme_bw()+
ylab("Median Age [years] \n")+
xlab("")+
theme(axis.title.x = element_text(size=10),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 10, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4,5))))
age_pregnant_plot
#-------------------------Age distribution of pediatric and neonatal COVID-19 patients------------------------------------------------------------------------------------------------------
#Subset data: Pediatric and neonatal COVID-19 patients
age_distr_pediatric_neonates <- subset(age_data,(!(is.na(Age_median_nd)) & (!(Study_Population =='Adult' |Study_Population =='Pregnant' ))))
names(age_distr_pediatric_neonates)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_pediatrics_overall<-pool.med(age_distr_pediatric_neonates$Age_median_nd, age_distr_pediatric_neonates$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_pediatrics_overall
Q1_age_pediatrics_overall<-pool.med(age_distr_pediatric_neonates$Age_Q1_nd, age_distr_pediatric_neonates$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_pediatrics_overall
Q3_age_pediatrics_overall<-pool.med(age_distr_pediatric_neonates$Age_Q3_nd, age_distr_pediatric_neonates$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_pediatrics_overall
#Plot age distribution
age_pediatric_plot <- age_distr_pediatric_neonates %>%
mutate(study_sorted = fct_reorder(Study_nr2, desc(Age_median_nd))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Age_median_nd), ymin = Age_Q1_nd, ymax = Age_Q3_nd)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients), shape=17) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_pediatrics_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Pediatrics/Neonates")+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x = element_text(size=10),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 10, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3))))
age_pediatric_plot
#---Pool figure---#
library("cowplot")
pregnant_pediatric<-plot_grid(age_pregnant_plot, age_pediatric_plot,
labels = c( "B", "C"),
ncol = 1, nrow = 2)
pregnant_pediatric
all_patients <-plot_grid(age_adult_plot, pregnant_pediatric,
labels = c( "A"),
rel_widths = c(1.5, 1),
ncol = 2, nrow = 1)
all_patients
#-------------------------Age distribution of severe patients ------------------------------------------------------------------------------------------------------
#Clear working space
rm(list=ls())
#Load data
age_distr_severe_severe_non_severe <- read.csv("/Users/jutzelec/Documents/GitHub/Corona-Virus-Meta-Analysis-2020/Corona_review_demographics_of_severity_mortality_cohort.csv", sep =',', header = TRUE)
##Subset data for survivors
age_distr_severe <- subset(age_distr_severe_severe_non_severe, disease_status=='severe' & Median>0)
#Show names of colimns
names(age_distr_severe)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_severe_overall<-pool.med(age_distr_severe$Median, age_distr_severe$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_severe_overall
class(median_age_severe_overall$pooled.est)
Q1_age_severe_overall<-pool.med(age_distr_severe$Q1, age_distr_severe$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_severe_overall
Q3_age_severe_overall<-pool.med(age_distr_severe$Q3, age_distr_severe$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_severe_overall
#Plot age distribution
age_adult_severe_plot<- age_distr_severe %>%
mutate(study_sorted = fct_reorder(Study_nr, desc(Median))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Median), ymin = Q1, ymax = Q3)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients),shape=18) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_severe_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Severe")+
scale_y_continuous(limits=c(0, 100), breaks=c(0, 25, 50, 75, 100))+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x= element_blank(),
axis.text.x= element_blank(),
axis.ticks.x= element_blank(),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 12, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4))))
age_adult_severe_plot
#-------------------------Age distribution of non-severe patients ------------------------------------------------------------------------------------------------------
# #Clear working space
# rm(list=ls())
#Load data
age_distr_nonsevere_nonsevere_non_nonsevere <- read.csv("/Users/jutzelec/Documents/GitHub/Corona-Virus-Meta-Analysis-2020/Corona_review_demographics_of_severity_mortality_cohort.csv", sep =',', header = TRUE)
##Subset data for survivors
age_distr_nonsevere <- subset(age_distr_nonsevere_nonsevere_non_nonsevere, disease_status=='non_severe' & Median>0)
#Show names of colimns
names(age_distr_nonsevere)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_nonsevere_overall<-pool.med(age_distr_nonsevere$Median, age_distr_nonsevere$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_nonsevere_overall
class(median_age_nonsevere_overall$pooled.est)
Q1_age_nonsevere_overall<-pool.med(age_distr_nonsevere$Q1, age_distr_nonsevere$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_nonsevere_overall
Q3_age_nonsevere_overall<-pool.med(age_distr_nonsevere$Q3, age_distr_nonsevere$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_nonsevere_overall
#Plot age distribution
age_adult_nonsevere_plot<- age_distr_nonsevere %>%
mutate(study_sorted = fct_reorder(Study_nr, desc(Median))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Median), ymin = Q1, ymax = Q3)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients),shape=18) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_nonsevere_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Non-severe")+
scale_y_continuous(limits=c(0, 100), breaks=c(0, 25, 50, 75, 100))+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x= element_blank(),
axis.text.x= element_blank(),
axis.ticks.x= element_blank(),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 12, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4))))
age_adult_nonsevere_plot
#---Pool figure---#
library("cowplot")
pooled_plot <-plot_grid(age_adult_nonsevere_plot, age_adult_severe_plot,
labels = c( "A", "B"),
ncol = 2, nrow = 1)
pooled_plot
#-------------------------Age distribution of death patients ------------------------------------------------------------------------------------------------------
# #Clear working space
# rm(list=ls())
#Load data
age_distr_death_death_non_death <- read.csv("/Users/jutzelec/Documents/GitHub/Corona-Virus-Meta-Analysis-2020/Corona_review_demographics_of_severity_mortality_cohort.csv", sep =',', header = TRUE)
##Subset data for non-survivors
age_distr_death <- subset(age_distr_death_death_non_death, disease_status=='death')
#Show names of colimns
names(age_distr_death)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_death_overall<-pool.med(age_distr_death$Median, age_distr_death$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_death_overall
class(median_age_death_overall$pooled.est)
Q1_age_death_overall<-pool.med(age_distr_death$Q1, age_distr_death$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_death_overall
Q3_age_death_overall<-pool.med(age_distr_death$Q3, age_distr_death$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_death_overall
#Plot age distribution
age_adult_death_plot<- age_distr_death %>%
mutate(study_sorted = fct_reorder(Study_nr, desc(Median))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Median), ymin = Q1, ymax = Q3)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients),shape=18) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_death_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Non-survivors")+
scale_y_continuous(limits=c(0, 100), breaks=c(0, 25, 50, 75, 100))+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x = element_text(size=10),
legend.position = "none",
# legend.title = element_text(size=10, face='bold'),
# legend.text = element_text(size=9),
# legend.background = element_rect(fill = "#EFF2F4"),
# legend.key=element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 12, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4))))
age_adult_death_plot
#-------------------------Age distribution of survivor patients ------------------------------------------------------------------------------------------------------
# #Clear working space
# rm(list=ls())
#Load data
age_distr_survivor_survivor_non_survivor <- read.csv("/Users/jutzelec/Documents/GitHub/Corona-Virus-Meta-Analysis-2020/Corona_review_demographics_of_severity_mortality_cohort.csv", sep =',', header = TRUE)
##Subset data for survivors
age_distr_survivor <- subset(age_distr_survivor_survivor_non_survivor, disease_status=='survivor')
#Show names of colimns
names(age_distr_survivor)
#Calculate pooled median, Q1, and Q3 of all studies using sample size as weight
median_age_survivor_overall<-pool.med(age_distr_survivor$Median, age_distr_survivor$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
median_age_survivor_overall
Q1_age_survivor_overall<-pool.med(age_distr_survivor$Q1, age_distr_survivor$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q1_age_survivor_overall
Q3_age_survivor_overall<-pool.med(age_distr_survivor$Q3, age_distr_survivor$number_of_patients, norm.approx = TRUE, coverage.prob = 1)
Q3_age_survivor_overall
#Plot age distribution
age_adult_survivor_plot<- age_distr_survivor %>%
mutate(study_sorted = fct_reorder(Study_nr, desc(Median))) %>% # order studies in descending order of median age
ggplot(aes(x = study_sorted, y = as.numeric(Median), ymin = Q1, ymax = Q3)) +
geom_point(position = position_dodge(width = 0.2), aes(size = number_of_patients),shape=18) +
geom_errorbar(position = position_dodge(width = 0.2), width = 0.1) +
geom_hline(yintercept=median_age_survivor_overall$pooled.est, size= 1, color="red", linetype='dotdash')+
ggtitle("Survivors")+
scale_y_continuous(limits=c(0, 100), breaks=c(0, 25, 50, 75, 100))+
coord_flip() +
theme_bw()+
ylab("Median Age [years]")+
xlab("")+
theme(axis.title.x = element_text(size=10),
legend.position = "none",
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_line(linetype = "dotted", size = 0.3, color = "#3A3F4A"),
panel.background = element_rect(fill = "#EFF2F4"),
plot.background = element_rect(fill = "#EFF2F4"),
plot.title = element_text(size = 12, hjust = 0.5, face = 'bold'))+ labs(size='Study Size')+
guides(size = guide_legend(override.aes = list(size=c(1,2,3,4))))
age_adult_survivor_plot
#---Pool figure---#
library("cowplot")
pooled_plot_severity_mortality <-plot_grid(age_adult_nonsevere_plot, age_adult_severe_plot, age_adult_survivor_plot,age_adult_death_plot,
labels = c( "A", "B", "C", "D"),
ncol = 2, nrow = 2)
pooled_plot_severity_mortality
|
ca14ff718669705fa22da94fee41a124eaa5c6a3 | be7adf829c6ed5b24e5218f390178bab81a2d72b | /CollectMergeWeather/FinalProjectWeatherData.R | 5a99109add2b1c58fcc596317e204750373fac34 | [] | no_license | OutlawSapper/ANLY512_FinalProjectSubmit | a08facffdfc63781935927f53e6cb90bc9b361c3 | df43d5d7681f91d0c3d15713251c7365878ed166 | refs/heads/master | 2022-05-22T00:29:13.997535 | 2020-04-30T17:54:25 | 2020-04-30T17:54:25 | 258,589,679 | 0 | 1 | null | 2020-04-24T23:14:36 | 2020-04-24T18:19:04 | HTML | UTF-8 | R | false | false | 3,199 | r | FinalProjectWeatherData.R | # Load all the necessary packages
library(urltools)
library(httr)
library(tidyverse)
library(tidyr)
library(chron)
# COLLECT WEATHER DATA
# Define the query path to the weather API
#query_path <- 'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/weatherdata/history?aggregateHours=1&startDateTime=2017-01-01T00%3A00%3A00&endDateTime=2019-12-31T00%3A00%3A00&collectStationContributions=false&maxStations=-1&maxDistance=-1&includeNormals=false&shortColumnNames=false&sendAsDatasource=false&allowAsynch=false&contentType=csv&unitGroup=us&key=CCZKWZ2TPJCPLQ5JDCH5QPCA8&locations=Gaithersburg%2C%20MD'
#url <- URLdecode(query_path)
#base_path = paste(url_parse(url)$scheme,'://',url_parse(url)$domain,'/',url_parse(url)$path,sep = "")
#urlbreakdown <- param_get(url)
#request <- GET(url=base_path,
# query=list(aggregateHours=urlbreakdown$aggregateHours[1],
# startDateTime=urlbreakdown$startDateTime[1],
# endDateTime=urlbreakdown$endDateTime[1],
# shortColumnNames=urlbreakdown$shortColumnNames[1],
# contentType=urlbreakdown$contentType[1],
# unitGroup=urlbreakdown$unitGroup[1],
# key=urlbreakdown$key[1],
# locations=urlbreakdown$locations[1]))
#response <- content(request, as = "text", encoding = "UTF-8")
# MERGE WEATHER AND TRAFFIC VIOLATION DATA
weather <- read.csv("hrlyWeather01-2017_12-2019.csv")
traffic <- read.csv("Traffic_Violations2.csv")
# Basic cleaning of the weather data
# Remove two unnecessary columns
dropCols <- c("Location","Resolved.Address")
weather <- weather[,!(names(weather) %in% dropCols)]
# Address to a character value
weather$Address <- as.character(weather$Address)
# Date to a date type value
weather$Date.time <- strptime(weather$Date.time, format = "%m/%d/%Y %H:%M")
# All NAs are from zeros in specific weather features, so replace them with 0
weather[is.na(weather)] <- 0
# Traffic Data
# Remove the extra index column
traffic <- traffic[,!(names(traffic) %in% "X")]
# Make sure traffic date column is in same format
# Convert to Characters
traffic$Date.Of.Stop <- as.character(traffic$Date.Of.Stop)
traffic$Time.Of.Stop <- as.character(traffic$Time.Of.Stop)
# Merge into one column and drop the old ones
traffic <- unite(traffic, Date.time, c(Date.Of.Stop,Time.Of.Stop), sep = " ", remove = TRUE)
traffic$Date.time <- strptime(traffic$Date.time, format = "%Y-%m-%d %H:%M:%S")
# Merge traffic and weather data by dates and times
# Order both by Date.time to check work observationally
traffic <- traffic[order(traffic$Date.time),]
weather <- weather[order(weather$Date.time),]
# Round traffic violation date and time to the nearest hour to match weather data
traffic$Date.time <- round(traffic$Date.time, units="hours")
# Join the weather and traffic data
trafficWeatherMerged <- merge(traffic,weather, by=intersect(names(traffic),names(weather)),sort=FALSE)
# Drop weather address column
trafficWeatherMerged <- subset(trafficWeatherMerged,select = -Address)
# Write to a csv
write.csv(trafficWeatherMerged,"MergedTrafficWeather.csv")
|
3c57116d793fd868248dd014fb8e17bd11911d12 | 44ecda341c627537356e097624724ab6eb35c437 | /benchmark_3.R | 063d8d87c7cfdd4083128de5171c0f6fbf8deb09 | [] | no_license | dylansun/Ali_recommend_2015 | 9661e3c731908786c90d0a4145c1aebb61c2d07c | aebb15c547607eebbc7c4523bd737120244d7f0d | refs/heads/master | 2020-04-06T07:00:32.990485 | 2015-04-07T06:14:41 | 2015-04-07T06:14:41 | 33,399,394 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,501 | r | benchmark_3.R | ## benchmark
##
train.user <- read.csv("data/tianchi_mobile_recommend_train_user.csv")
data.12.18 <- subset(train.user, substr(train.user$time,1,10) == "2014-12-18" )
data.12.18.add <- subset(data.12.18, data.12.18$behavior_type == 3)
data.12.18.buy <- subset(data.12.18, data.12.18$behavior_type == 4)
user.item.add <- data.frame(user_id = data.12.18.add$user_id, item_id = data.12.18.add$item_id, time = data.12.18.add$time)
user.item.buy <- data.frame(user_id = data.12.18.buy$user_id, item_id = data.12.18.buy$item_id. time = data.12.18.buy$time)
remove_common <- function(add, buy){
n1 <- nrow(add)
n2 <- nrow(buy)
idx <- rep(TRUE, n1)
for(i in 1:n1){
for(j in 1:n2){
if((add$item_id[i] == buy$item_id[j] )& (add$user_id[i] == buy$user_id[j]) ){
idx[i] = FALSE
}
}
}
add[idx,]
}
left <- remove_common(user.item.add,user.item.buy)
left.unique <- unique(left)
attach(left)
newdata <- newdata[order(time, decreasing = T),]
detach(left)
newdata.2000 <- newdata[1:2000,]
newdata.3000 <- newdata[1:3000,]
newdata.2000.submit <- data.frame(user_id = newdata.2000$user_id, item_id = newdata.2000$item_id )
newdata.3000.submit <- data.frame(user_id = newdata.3000$user_id, item_id = newdata.3000$item_id )
write.csv(newdata.2000.submit, file = "data/tianchi_mobile_recommendation_predict.2000.csv",row.names = FALSE, quote = FALSE)
write.csv(newdata.3000.submit, file = "data/tianchi_mobile_recommendation_predict.3000.csv",row.names = FALSE, quote = FALSE)
|
9d7361d4aaa885fcc13f2c99f10ca5760245dab7 | 5c63ee58040fc970fc4eb88d2022d5cc9b53f8e9 | /man/guitar_plot.Rd | c72b62244834ea833bad374531c235551b11447d | [] | no_license | ZW-xjtlu/exomePeak2Test | 566f3ee92fe209b816512fc630063a9479321ccf | 57aef76b7ea004bda8ed5603d8e061c2ecf49fd2 | refs/heads/master | 2020-03-23T22:08:34.786207 | 2020-01-05T12:54:17 | 2020-01-05T12:54:17 | 142,155,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 266 | rd | guitar_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guitar_plot.R
\name{guitar_plot}
\alias{guitar_plot}
\title{Produce code for guitar plot}
\usage{
guitar_plot(pkc_rds_file, front_name, ...)
}
\description{
Produce code for guitar plot
}
|
f80de60a21432cce342ac3f3eacb01a4bc55944a | 62ec89b6d425b18ff1ff5be2b9649785282d7865 | /inst/snippets/Example8.3.R | 33f2b1bcc8c01734fa7c5aff4afbf90dea7ebede | [] | no_license | klaassenj/Lock5withR | 8cf9bab45ba1d87db77713b8c9b6826b663363c3 | f2773d9f828b72882ed1c4d6b3b2e539a3b3b24a | refs/heads/master | 2023-08-02T15:20:09.975771 | 2018-03-14T22:48:09 | 2018-03-14T22:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 74 | r | Example8.3.R | Ants.Model <- lm (Ants ~ Filling, data = SandwichAnts)
anova(Ants.Model)
|
7efe15ae2f19ecbe37c52b95d97087851b61765a | f21f68f0e73f76e509c4332831a08b53430d03dd | /rsrc/modalSurface.R | 01210f9429afaaf9d2e614f5a36a3b372327ca86 | [
"MIT"
] | permissive | joelfiddes/topoMAPP | 7949967d2031f6ee8e90a9fbf577e2ac3c177a1d | 1a4747034ecc56277f8b3313150e1db188f26d47 | refs/heads/master | 2021-07-12T11:34:31.904518 | 2019-02-07T15:46:06 | 2019-02-07T15:46:06 | 104,479,339 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 920 | r | modalSurface.R | #====================================================================
# SETUP
#====================================================================
#INFO
#DEPENDENCY
require(raster)
#====================================================================
# PARAMETERS/ARGS
#====================================================================
args = commandArgs(trailingOnly=TRUE)
wd=args[1] #'/home/joel/sim/topomap_test/grid1' #
#====================================================================
# PARAMETERS FIXED
#====================================================================
#********************** SCRIPT BEGIN *******************************
setwd(wd)
#get modal surface type of each sample 0=vegetation, 1=debris, 2=steep bedrock
lc=raster('predictors/surface.tif')
zones=raster('landform.tif')
zoneStats=zonal(lc,zones, modal,na.rm=T)
write.table(zoneStats,'landcoverZones.txt',sep=',', row.names=F) |
472fb51a1c0df4593124a1f629a5c9f976bd5fb9 | 2ede3a798b6f535fc131ae294f9fd01a7210f175 | /Day_4.R | 375f727002263d68314d29ef2ca10ef4e12dfa4a | [] | no_license | jessecolephillips/BioStats_2021 | e41251ed92ea486ecd03dfab880bdfe03ff2a0a1 | 96a29b58a0ac65d751bfe02983516c936c566ada | refs/heads/master | 2023-04-13T08:42:13.981867 | 2021-04-22T10:59:14 | 2021-04-22T10:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,476 | r | Day_4.R | #BioStats 2021
#Day 4
#22/04/2021
#Jesse Phillips
#Packages
library(ggplot2)
library(tidyr)
library(rcompanion)
# Confidence Intervals ----------------------------------------------------
# Calculatin confidence: Bootstrapping
Input <- ("
Student Sex Teacher Steps Rating
a female Jacob 8000 7
b female Jacob 9000 10
c female Jacob 10000 9
d female Jacob 7000 5
e female Jacob 6000 4
f female Jacob 8000 8
g male Jacob 7000 6
h male Jacob 5000 5
i male Jacob 9000 10
j male Jacob 7000 8
k female Sadam 8000 7
l female Sadam 9000 8
m female Sadam 9000 8
n female Sadam 8000 9
o male Sadam 6000 5
p male Sadam 8000 9
q male Sadam 7000 6
r female Donald 10000 10
s female Donald 9000 10
t female Donald 8000 8
u female Donald 8000 7
v female Donald 6000 7
w male Donald 6000 8
x male Donald 8000 10
y male Donald 7000 7
z male Donald 7000 7
")
data <- read.table(textConnection(Input),header = TRUE) #Reads text data into df format
summary(data)
# ungrouped data is indicated with a 1 on the right side of the formula, or the group = NULL argument.
groupwiseMean(Steps ~ 1,data = data, conf = 0.95, digits = 3)
# one-way data
grouped_mean_data <- groupwiseMean(Steps ~ Sex, data = data, conf = 0.95,digits = 3)
grouped_mean_data
# let's plot this data
ggplot(grouped_mean_data, aes(x = Sex, y = Mean)) +
geom_col(col = "black", fill = "salmon", width = 0.6) +
geom_errorbar(aes(x = Sex, ymax = Trad.upper, ymin = Trad.lower), width = 0.25) +
theme_minimal() +
labs(x = "", y = "Steps", title = "Mean number of steps taken by female and \nmale students")
# two-way data
g_m_dat_2 <- groupwiseMean(Steps ~ Teacher + Sex, data = data, conf = 0.95,digits = 3)
g_m_dat_2
# let's plot this data as well
ggplot(g_m_dat_2, aes(x = Sex, y = Mean, group = Teacher, fill = Teacher)) +
geom_col(position = "dodge", col = "black", width = 0.8) +
geom_errorbar(position = position_dodge(0.8), aes(x = Sex, ymax = Trad.upper, ymin = Trad.lower), width = 0.25) +
theme_minimal() +
labs(x = "", y = "Steps", title = "Mean number of steps taken by female and \nmale students") +
scale_fill_brewer(palette = "Set1")
# by bootstrapping (randomly generating data based on data generated in experiment)
groupwiseMean(Steps ~ Sex,
data = data,
conf = 0.95,
digits = 3,
R = 10000, # number of bootstrap replicates to use for bootstrapped statistics
boot = TRUE, # if TRUE, includes mean of the bootstrapped means
traditional = FALSE,
normal = FALSE,
basic = FALSE,
percentile = FALSE,
bca = TRUE)
groupwiseMean(Steps ~ Teacher + Sex,
data = data,
conf = 0.95,
digits = 3,
R = 10000,
boot = TRUE,
traditional = FALSE,
normal = FALSE,
basic = FALSE,
percentile = FALSE,
bca = TRUE)
# ANOVA
anova <- aov(Steps ~ Sex * Teacher, data = data) #shows interaction
summary(anova)
anova_Tukey <- TukeyHSD(anova)
plot(anova_Tukey)
|
3a8e16b7794f52e0cf8815cca5e121b5d1622512 | ab25a481816f258a9ae1cc5e6484fc020acae0bc | /man/loadAffyLibraries.Rd | 1edcb0441500edb7b0f4cf3f916270971dbecfc4 | [
"BSD-2-Clause-Views"
] | permissive | coreymhudson/AffyDistance | aad5ed9191c337a74dd471b7e9e183e800d3c62f | a3a0acf14348495641993664a4e68eb927d8c6bb | refs/heads/master | 2021-01-18T22:48:44.915222 | 2016-06-07T21:42:25 | 2016-06-07T21:42:25 | 32,289,280 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | rd | loadAffyLibraries.Rd | \name{loadAffyLibraries}
\alias{loadAffyLibraries}
\title{
Function to load necessary libraries
}
\description{
Function to load all the libraries that this package needs.
}
\usage{
loadAffyLibraries()
}
\details{
Function to load all the libraries that this package needs.
}
\value{
None
}
\references{
}
\author{
Corey M. Hudson <coreymhudson>
}
\note{
}
\examples{
function(){
library(limma)
library(gcrma)
library(MASS)
library(gamlss)
library(stats4)
}
}
\keyword{ libraries }
|
e3480db4f20058bce91a2c8f55dfb6f2f6e86599 | 83ba9d26655808ca7c6d41177e4b51521eeb5016 | /plot3.R | f3a899346dc2e88e4e4042df9ed10a2b1567bc5e | [] | no_license | mesuarezgn/ExData_Plotting1 | 01fee4791a664d20c3b3d05e976768f51a26a5da | e763a8820fea39a06180c7c56cfd80ba8e3bfe7a | refs/heads/master | 2021-01-15T18:59:31.069073 | 2016-02-29T03:00:22 | 2016-02-29T03:00:22 | 29,084,038 | 0 | 0 | null | 2015-01-11T06:55:47 | 2015-01-11T06:55:47 | null | UTF-8 | R | false | false | 1,230 | r | plot3.R | # Loading the packages
library(data.table)
library(plyr)
# Loading the data
pdata <- fread("household_power_consumption.txt")
# Subsetting the data
pdata <- subset(pdata, Date == "1/2/2007" | Date == "2/2/2007")
# Using the date format
pdata <- mutate(pdata,
datetime = strptime(
paste(pdata$Date, pdata$Time),
"%e/%m/%Y %H:%M:%S"
)
)
# Using the numeric format
class(pdata$Sub_metering_1) <- "numeric"
class(pdata$Sub_metering_2) <- "numeric"
# Selecting the PNG format
png('plot3.png',width=480,height=480)
# Creating the graphic
with(pdata,
plot(datetime, Sub_metering_1,
cex.axis = 0.75,
cex.lab = 0.75,
type = "n",
xlab = "",
ylab = "Energy sub metering")
)
with(pdata, lines(datetime, Sub_metering_1))
with(pdata, lines(datetime, Sub_metering_2, col = "red"))
with(pdata, lines(datetime, Sub_metering_3, col = "blue"))
legend(
"topright",
col = c("black", "red", "blue"),
cex = 0.75,
lty = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
)
# Closing the graphic
dev.off()
|
7d40b5552ad44e7a3f8acb1b97b93074b4c498d2 | d320e73415f0e4886cbc5c68cf105ef946143aa6 | /man/FCFE_Basic.Rd | fec5022e7209889e45966319b334b4733bd4fce6 | [] | no_license | JancoS/EquityR | 3858bdbd6eb139de0568ad71e0556d0b7b0ce056 | 5f64126f5cd0073c8d896749d22a635cbe17f69e | refs/heads/master | 2020-03-28T23:58:08.125190 | 2018-09-18T17:17:41 | 2018-09-18T17:17:41 | 149,318,187 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 543 | rd | FCFE_Basic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FCFE_Basic.R
\name{FCFE_Basic}
\alias{FCFE_Basic}
\title{FCFE_Basic}
\usage{
FCFE_Basic(Ticker, Req, year, growth_rate)
}
\arguments{
\item{Ticker}{Ticker of the company to be evaluated}
\item{Req}{Required rate of return}
\item{year}{Year when latest results is available}
\item{growth_rate}{The growth rate g}
}
\value{
FCFE_Value
}
\description{
Basic valuation of a company based on the FCFE. The FCFE is calculated by adjusting the FCFF by Net Borrowings
}
|
bcc79996fc0f47888647a9c6a8897ceefbf1953b | 6b85f742f818f07b1dd383dc448c41ced550bbab | /R/Combined_Model/fr_fit.R | bf1579848a47f3a0d066178e14d40353de3a2ca6 | [] | no_license | cmhoove14/Prawn_fisheries_Public_health | 3048489116fa9d0ef3e7f9d4add731d288aa1aff | 075ad4d66d339acdb27f8c1ef3139cfd8626e3f2 | refs/heads/master | 2020-05-21T04:23:12.401490 | 2019-04-28T17:47:05 | 2019-04-28T17:47:05 | 61,903,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,788 | r | fr_fit.R | ## Fitting alpha and Th functions based on data from Sokolow et al. 2014
#Attack rate model
a_points = read.csv("Data/a_points.csv", header = FALSE)
x = c(0:500)
plot(a_points[,1], a_points[,2], xlab = "Prawn-to-snail biomass ratio", ylab = "Attack rate", pch = 16,
xlim = c(0,500))
#logarithmic function
a_model0 = nls(V2 ~ b*log(V1), data = a_points, start = list(b=1)) # Alternate model (linear with forced 0-intercept): lm(a_points[,2] ~ 0 + a_points[,1])
summary(a_model0)
lines(x, a_model0$m$getPars()[1]*log(x))
#logarithmic function
a_model = nls(V2 ~ c + b*log(V1), data = a_points, start = list(c = -log(3), b=1)) # Alternate model (linear with forced 0-intercept): lm(a_points[,2] ~ 0 + a_points[,1])
summary(a_model)
lines(x, a_model$m$getPars()[1] + a_model$m$getPars()[2]*log(x), col = 2)
#logarithmic model excluding points that fall in refuge biomass ratio (~6)
a2 = subset(a_points, V1 >= 3)
a_model2 = nls(V2 ~ b*log(V1) - c, data = a2, start = list(b=1, c = 3)) # Alternate model (linear with forced 0-intercept): lm(a_points[,2] ~ 0 + a_points[,1])
summary(a_model2)
lines(x, a_model2$m$getPars()[1]*log(x) - a_model2$m$getPars()[2], col = 3)
#linear model
a_model3 = nls(V2 ~ a+b*V1, data = a_points, start = list(a = 0, b=1)) # Alternate model (linear with forced 0-intercept): lm(a_points[,2] ~ 0 + a_points[,1])
summary(a_model3)
lines(x, a_model3$m$getPars()[1] + a_model3$m$getPars()[2]*x, col = 4)
AIC(a_model0, a_model, a_model2, a_model3)
#Model 2 provides the best fit
#handling time model
th_points = read.csv("Data/th_points.csv", header = FALSE)
plot(th_points[,1], th_points[,2], xlab = "Prawn-to-snail biomass ratio", ylab = "Handling time", pch = 16)
th_model = nls(V2 ~ 1/(c*V1), data = th_points, start = list(c=1))
summary(th_model)
lines(x, 1/(th_model$m$getPars()*x))
#Test per capita consumption rate given prawn-snail biomass ratio #########
n_dens = c(0:50)
a_rate <- function(bmr, n, ex, pen){
a = predict(a_model0, newdata = data.frame(V1 = bmr))
Th = predict(th_model, newdata = data.frame(V1 = bmr))
(a*n^ex)/(1+a*Th*n^ex)/pen
}
plot(n_dens, sapply(n_dens, a_rate, bmr = 100, ex = 1, pen = 10), type = 'l', ylim = c(0,30),
xlab = "Snail density", ylab = "Snails consumed per prawn per day",
main = "Consumption rate per prawn, Holling's II")
lines(n_dens, sapply(n_dens, a_rate, bmr = 50, ex = 1, pen = 10), col = 2)
lines(n_dens, sapply(n_dens, a_rate, bmr = 200, ex = 1, pen = 10), col = 4)
plot(n_dens, sapply(n_dens, a_rate, bmr = 100, ex = 1.5, pen = 10), type = 'l', ylim = c(0,30),
xlab = "Snail density", ylab = "Snails consumed per prawn per day",
main = "Consumption rate per prawn, Holling's III")
lines(n_dens, sapply(n_dens, a_rate, bmr = 50, ex = 1.5, pen = 10), col = 2)
lines(n_dens, sapply(n_dens, a_rate, bmr = 200, ex = 1.5, pen = 10), col = 4)
#Test with multiple size classes #########
a_rate2 <- function(n1, n2, n3, bm1, bm2, bm3, ex, pen){
a1 = predict(a_model0, newdata = data.frame(V1 = bm1))
Th1 = predict(th_model, newdata = data.frame(V1 = bm1))
c1 <- (a1*n1^ex)/(1+a1*Th1*(n1+n2+n3)^ex)/pen
a2 = predict(a_model0, newdata = data.frame(V1 = bm2))
Th2 = predict(th_model, newdata = data.frame(V1 = bm2))
c2 <- (a2*n2^ex)/(1+a2*Th2*(n1+n2+n3)^ex)/pen
a3 = predict(a_model0, newdata = data.frame(V1 = bm3))
Th3 = predict(th_model, newdata = data.frame(V1 = bm3))
c3 <- (a3*n3^ex)/(1+a3*Th3*(n1+n2+n3)^ex)/pen
return(c(c1, c2, c3))
}
sum(a_rate2(30,10,5,100,100,100,1,10))
a_rate(100, 45,1,10)
# Consumption rates hold if biomass ratio is consistent across different snail classes
#But how should it perform with different biomass ratios and densities
a_rate2(30,10,5,200,100,50,1,10)
sum(a_rate2(30,10,5,200,100,50,1,10))
# Distribute predation between classes logically
a_rate3 <- function(n1, n2, n3, bm1, bm2, bm3, ex, pen){
a1 = predict(a_model0, newdata = data.frame(V1 = bm1))
Th1 = predict(th_model, newdata = data.frame(V1 = bm1))
a2 = predict(a_model0, newdata = data.frame(V1 = bm2))
Th2 = predict(th_model, newdata = data.frame(V1 = bm2))
a3 = predict(a_model0, newdata = data.frame(V1 = bm3))
Th3 = predict(th_model, newdata = data.frame(V1 = bm3))
c1 <- (a1*n1^ex)/(1+sum(c(a1*Th1*n1,
a2*Th2*n2,
a3*Th3*n3))^ex)/pen
c2 <- (a2*n2^ex)/(1+sum(c(a1*Th1*n1,
a2*Th2*n2,
a3*Th3*n3))^ex)/pen
c3 <- (a3*n3^ex)/(1+sum(c(a1*Th1*n1,
a2*Th2*n2,
a3*Th3*n3))^ex)/pen
return(c(c1, c2, c3))
}
a_rate3(30,10,5,200,100,50,1,10)
sum(a_rate3(30,10,5,200,100,50,1,10))
|
bd0e386d6435c3a44faadc5012d1d57012a603ca | e161195a09e161f978e8610a345bd8320806a692 | /R/loglikihgcov.R | 829c71dda38304f7b0c1b79a9e50d4711943c945 | [] | no_license | cran/CUB | 835be9f64528f974025d8daaff7cc1f99f2eae1a | 7c47f960512aa90db261ba9ed41006a191440c1a | refs/heads/master | 2020-04-06T21:07:58.688216 | 2020-03-31T14:30:19 | 2020-03-31T14:30:19 | 48,078,715 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 791 | r | loglikihgcov.R | #' @title Log-likelihood function for the IHG model with covariates
#' @aliases loglikihgcov
#' @description Compute the log-likelihood function for the IHG model
#' with covariates to explain the preference parameter.
#' @usage loglikihgcov(m, ordinal, U, nu)
#' @keywords internal
#' @seealso loglikIHG
#' @param m Number of ordinal categories
#' @param ordinal Vector of ordinal responses
#' @param U Matrix of selected covariates for explaining the preference parameter
#' @param nu Vector of coefficients for covariates, whose length equals NCOL(U)+1 to include
#' an intercept term in the model (first entry of nu)
loglikihgcov <-
function(m,ordinal,U,nu){
if (is.factor(ordinal)){
ordinal<-unclass(ordinal)
}
U<-as.matrix(U)
sum(log(probihgcovn(m,ordinal,U,nu)))
}
|
ac39a4f030ed12c7cf5e72895c2ce3488a634648 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/coefplot/examples/extract.coef.glm.Rd.R | fd81c5d479830d8dd9fa5db3b35c7ba31529f2b6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 336 | r | extract.coef.glm.Rd.R | library(coefplot)
### Name: extract.coef.glm
### Title: extract.coef.glm
### Aliases: extract.coef.glm
### ** Examples
## Not run:
##D require(ggplot2)
##D data(diamonds)
##D library(coefplot)
##D mod2 <- glm(price > 10000 ~ carat + cut + x, data=diamonds, family=binomial(link="logit"))
##D extract.coef(mod2)
## End(Not run)
|
c5efe00081206b7f47f28ae894e23e1bacbc445d | 94f13e24d28c676532080db7e06617525ce18cf1 | /read_files.R | 3f88be23630a59216797c5f679812035edf0c3b7 | [] | no_license | Kita2015/basic-scripts | f0e19b5b99dd3d4757216e5353e29f5def81a400 | 9f79e78c3d42b2e975d4e0ee6f727ba63895ad77 | refs/heads/master | 2020-03-07T18:44:05.548142 | 2018-04-01T18:07:30 | 2018-04-01T18:07:30 | 127,650,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 641 | r | read_files.R | #read file from Dutch Excel
as10_ds06 <- read.csv2("\\\\alt.rivm.nl/users/home/blokhuic/Documents/Data-analyse_airsenseur/Data/as10_fixed_copy/DataSet_000006.csv_fixed.txt", sep = ";", dec = ",", header=FALSE, stringsAsFactors = FALSE, quote="")
#assign names to columns raw data
colnames(as10_ds06) <- c("Unixdate","Sensor","Variabele","V4","V5","Waarde","Unix2","Longitude","Latitude","Height")
#export cleaned dataframe with dots for decimal sign
write.table(as10_ds06, file ="\\\\alt.rivm.nl/users/home/blokhuic/Documents/Data-analyse_airsenseur/Data_cleaned/as10_cleaned/as10_ds06.csv",row.names=FALSE,col.names=TRUE,sep=";",dec=".")
|
0c2be12bdb147bff31b97c38408cbf10e38e2c6d | b60483cd3af69ebcb1a07dcee6486644896ca327 | /R/scores.R | 80a4a893eaa6296e145bfe638aab12fe6132388d | [] | no_license | cran/EMJumpDiffusion | 98e85ff7b06d6ae67c88598693373a7237ea5c67 | 418e3b59dd91070d2fe07e25dc765bc2daab7e41 | refs/heads/master | 2020-12-25T19:26:24.285948 | 2009-05-05T00:00:00 | 2009-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 494 | r | scores.R | `scores` <-
function(org, calc)
{
hit<-0
miss<-0
total<-0
toomuch<-0
T<-length(org)
for (t in 2:(T-1))
{
if(org[t]==1)
{
total<-total+1
if(calc[t]==1)
{
hit<-hit+1
}
else
{
miss<-miss+1
}
}
else
{
if(calc[t]==1)
{
toomuch<-toomuch+1
}
}
}
cat("Comparison of original jumps with estimated jumps \n")
cat("Total jumps:\t\t", total, "\n")
cat("Discovered jumps:\t", hit, "\n")
cat("Missed jumps:\t\t", miss, "\n")
cat("Additional jumps:\t", toomuch, "\n")
}
|
e282b70f191b0679d4dacf8060ef39485beffba8 | 9491ed950b0d8d8774d49e1865652c7267ea885a | /R/vis.r | 1cb55e42773a51b6b2d1affb6a4c116811560ef8 | [] | no_license | vlpb3/faradr | 722a294359bee23722117bb1e89feab1fc9e30e0 | 1bde734c6c6211ddf68bdc58318b57d3091112bb | refs/heads/master | 2021-01-20T10:54:25.687362 | 2016-08-16T13:06:42 | 2016-08-16T13:06:42 | 18,512,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,335 | r | vis.r | #' Plot aligned reads over annotated region.
#' @param aln read alignments in GAlignments objet (GenomicTRanges)
#' @param annots genome annotations in GRanges object
#' @param plotfile path for output plot
#' @importFrom ggbio autoplot tracks
#' @importFrom ggplot2 aes
#' @export
PlotAlignAnnot <- function(aln, annots, plotfile) {
reads.track <- autoplot(aln)
annots.track <- autoplot(annots,
aes(color=group, fill=group))
png(filename=plotfile,
width=1024, height=512)
print(tracks(alignments=reads.track,
annotation=annots.track,
heights=c(5,1)))
dev.off()
}
#' Plot aligned reads over annotated region.
#' @param aln read alignments in GAlignments objet (GenomicTRanges)
#' @param annots genome annotations in GRanges object
#' @param plotfile path for output plot
#' @importFrom ggbio autoplot tracks
#' @importFrom ggplot2 aes
#' @importFrom IRanges coverage
#' @export
PlotCoverageAnnot <- function(aln, annots, plotfile) {
cov <- coverage(aln)
coverage.track <- autoplot(cov, binwidth=10)
annots.track <- autoplot(annots,
aes(color=group, fill=group))
png(filename=plotfile,
width=1024, height=512)
print(tracks(coverage=coverage.track,
annotation=annots.track,
heights=c(5,1)))
dev.off()
}
#' Plot quality scores per cycle
#' @param data.dir string path to data
#' @param file.pattern string pattern for input files
#' @return plot object
#' @importFrom ShortRead qa
#' @importFrom ggplot2 ggplot geom_boxplot
PlotPerCycleQuality <- function(data.dir, file.pattern) {
input.type <- "fastq"
sreadq.qa <- qa(data.dir, file.pattern, type=input.type)
perCycle <- sreadq.qa[["perCycle"]]
perCycle.q <- perCycle$quality
pcq <- perCycle.q[rep(seq_len(nrow(perCycle.q)), perCycle.q$Count), ]
p <- ggplot(pcq, aes(factor(Cycle), Score))
p <- p + geom_boxplot()
return(p)
}
#' Plot mean quality per read
#' @param data.dir path to data dir
#' @param file.pattern pattern of input fastq
#' @return plot object
PlotMeanReadQuality <- function(data.dir, file.pattern) {
count.qmeans <- function(s, data.dir) {
fq <- yield(FastqSampler(file.path(data.dir, s), n=1000000))
qm <- as(quality(fq), "matrix")
row.means <- rowMeans(qm, na.rm=T)
qmeans <- data.frame(mean.qual=row.means, lane=rep(s, length(row.means)))
return(qmeans)
}
samples <- list.files(data.dir, file.pattern)
qmeans <- adply(samples, 1, count.qmeans, data.dir=data.dir)
p <- ggplot(qmeans, aes(x=mean.qual))
p <- p + geom_histogram(aes(y=..density..),
alpha=.1,
fill="green",
colour="darkgreen",
binwidth=.5)
p <- p + geom_density() + facet_wrap(~lane)
return(p)
}
#' Plot read length distribution
#' @param data.dir path to data dir
#' @param file.pattern pattern of input fastq
#' @return plot object
PlotReadLengthDistribution <- function(data.dir, file.pattern) {
samples <- list.files(data.dir, file.pattern)
count.rlens <- function(s, data.dir) {
fq <- yield(FastqSampler(file.path(data.dir, s), n=1000000))
rlens <- width(fq)
rlen95 <- quantile(width(fq), .95)
rlens <- rlens[rlens <= rlen95]
rlens.df <- data.frame(read.len=rlens, lane=rep(s, length(rlens)))
return(rlens.df)
}
rlens <- adply(samples, 1, count.rlens, data.dir=data.dir)
p <- ggplot(rlens, aes(x=read.len))
p <- p + geom_histogram(aes(y=..density..),
alpha=0.02,
fill="green",
colour="darkgreen",
binwidth=1)
p <- p + geom_density(adjust=3) + facet_wrap(~lane)
return(p)
}
#' Plot number of reads in each sample.
#'
#' Bar plot representing number of reads in each sample.
#' Samples are expected to be separate fastq files.
#' @param fqc FastQA from package ShortRead
#' @return plot object
#' @importFrom ggplot2 ggplot geom_bar theme labs
#' @export
Aplot <- function(fqc) {
df <- data.frame(nReads=fqc[['readCounts']]$read,
sample=row.names(fqc[['readCounts']]))
p <- ggplot(df, aes(x=sample, y=nReads, fill=sample))
p <- p + geom_bar(alpha=0.6, stat='identity')
p <- p + theme(legend.position="none", axis.text.x=element_text(angle=45, hjust=1))
p <- p + labs(x="Sample", y="Number of Reads")
return(p)
}
#' Plot number of reads in each sample for each grouping factor.
#'
#' Bar plots representing number of reads in each sample.
#' There is one bar plot per grouping factor.
#' Samples are grouped and clored by grouping factor.
#' Grouping factors are extracted from design table.
#' Samples are expected to be separate fastq files.
#' @param fqc FastQA from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return list of plot objects
#' @importFrom ggplot2 ggplot geom_bar theme labs
#' @importFrom stringr str_replace
#' @export
A.design.plot <- function(fqc, design.table) {
# get names of groups from desing file
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
df <- data.frame(nReads=fqc[['readCounts']]$read,
sampleid=str_replace(row.names(fqc[['readCounts']]), "\\.f(ast)?q", ""))
df <- merge(df, design.table)
plots <- lapply(groups, function(g.factor) {
# order by grouping factor
df <- df[order(df[g.factor]), ]
.e <- environment()
p <- ggplot(df, aes(x=sampleid, y=nReads, fill=factor(df[,g.factor])),
environment=.e)
p <- p + geom_bar(alpha=0.6, stat='identity')
p <- p + theme(axis.text.x=element_text(angle=45, hjust=1))
p <- p + guides(fill=guide_legend(title=g.factor))
p <- p + labs(x="Sample", y="Number of Reads")
return(p)
})
names(plots) <- groups
return(plots)
}
#' Plot read lenght distribution
#'
#' Density plot representing freaquencies or readlenths.
#' Samples are explected to be separate fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @importFrom ggplot2 ggplot geom_density labs
#' @export
B1plot <- function(samples) {
rlens <- lapply(samples, width)
df <- data.frame(sample=rep(names(samples), lapply(rlens, length)),
rlen=unlist(rlens), row.names=NULL)
q95rlen <- quantile(df$rlen, 0.95)
p <- ggplot(df, aes(rlen, group=sample, colour=sample))
p <- p + geom_density(alpha=I(0.4), adjust=3) + xlim(0, q95rlen)
p <- p + labs(x="Read Length", y="Fraction of Reads", colour="Sample")
}
#' Plot read lenght distribution
#'
#' Density plot representing freaquencies or readlenths.
#' There is one plot per grouping factor.
#' Samples clored by grouping factor.
#' Grouping factors are extracted from design.table.
#' Samples are explected to be separate fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return list of plot objects
#' @importFrom ggplot2 ggplot geom_density labs
#' @importFrom stringr str_replace
#' @export
B1.design.plot <- function(samples, design.table) {
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
rlens <- lapply(samples, width)
df <- data.frame(sampleid=rep(names(samples), lapply(rlens, length)),
rlen=unlist(rlens), row.names=NULL)
df <- merge(df, design.table)
q95rlen <- quantile(df$rlen, 0.95)
plots <- lapply(groups, function(g.factor){
.e <- environment()
p <- ggplot(df, aes(rlen, group=sampleid, colour=factor(df[,g.factor])),
environment=.e)
p <- p + geom_density(alpha=I(0.4), adjust=3) + xlim(0, q95rlen)
p <- p + guides(colour=guide_legend(title=g.factor))
p <- p + labs(x="Read Length", y="Fraction of Reads")
})
names(plots) <- groups
return(plots)
}
#' Plot fraction of reads with particular lengh or longer.
#'
#' Line plot showing fraction of reads on y axis
#' and minimal read length on x axis.
#' One line per sample.
#' Samples are explected to be separage fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @importFrom ggplot2 ggplot geom_line xlim labs
#' @import dplyr
#' @export
B2plot <- function(samples) {
rlens <- lapply(samples, width)
df <- data.frame(sampleid=rep(names(rlens), lapply(rlens, length)),
rlen=unlist(rlens), row.names=NULL)
q95rlen <- quantile(df$rlen, 0.95)
df <- df %>% group_by(sampleid, rlen) %>% summarise(count=n())
df <- df %>% group_by(sampleid) %>% mutate(cum.count=cumsum(count),
frac.count=cum.count/sum(count))
p <- ggplot(df, aes(group=sampleid, colour=sampleid ))
p <- p + geom_line(aes(rlen, 1-frac.count), alpha=0.4)
p <- p + xlim(min(df$rlen), q95rlen)
p <- p + labs(x="Read Length", y="Fraction of Reads", colour="Sampleid")
}
#' Plot fraction of reads with particular lengh or longer.
#'
#' Line plot showing fraction of reads on y axis
#' and minimal read length on x axis.
#' One line per sample.
#' There is one plot per grouping factor.
#' Samples clored by grouping factor.
#' Grouping factors are extracted from design.table.
#' Samples are explected to be separage fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return plots list of plot objects
#' @importFrom ggplot2 ggplot geom_line xlim labs
#' @import dplyr
#' @export
B2.design.plot <- function(samples, design.table) {
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
rlens <- lapply(samples, width)
df <- data.frame(sampleid=rep(names(rlens), lapply(rlens, length)),
rlen=unlist(rlens), row.names=NULL)
q95rlen <- quantile(df$rlen, 0.95)
df <- df %>% group_by(sampleid, rlen) %>% summarise(count=n())
df <- df %>% group_by(sampleid) %>% mutate(cum.count=cumsum(count),
frac.count=cum.count/sum(count))
df <- merge(df,design.table)
plots <- lapply(groups, function(g.factor){
.e <- environment()
p <- ggplot(df, aes(group=sampleid, colour=factor(df[ ,g.factor])),
environment=.e)
p <- p + geom_line(aes(rlen, 1-frac.count), alpha=0.4)
p <- p + xlim(min(df$rlen), q95rlen)
p <- p + guides(colour=guide_legend(title=g.factor))
p <- p + labs(x="Read Length", y="Fraction of Reads", colour="Sampleid")
})
names(plots) <- groups
return(plots)
}
#' Plot mean read quality distribution per sample.
#'
#' Boxplot showing the distribution of mean read quality.
#' One boxplot per sample.
#' Samples are explected to be separage fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @importFrom ggplot2 ggplot geom_boxplot theme labs
#' @export
C1plot <- function(samples) {
calc.qmeans <- function(fq) {
qm <- as(quality(fq), "matrix")
row.means <- rowMeans(qm, na.rm=T)
return(row.means)
}
qmeans <- lapply(samples, calc.qmeans)
df <- data.frame(sample=rep(names(qmeans), lapply(qmeans, length)),
qmeans=unlist(qmeans), row.names=NULL)
p <- ggplot(df, aes(factor(sample), qmeans, fill=sample))
p <- p + geom_boxplot(alpha=0.6, outlier.size=0)
p <- p + theme(legend.position="none", axis.text.x=element_text(angle=45, hjust=1))
p <- p + labs(x="Sample", y="Mean Read Quality")
}
#' Plot mean read quality distribution per sample.
#'
#' Boxplot showing the distribution of mean read quality.
#' One boxplot per sample.
#' There is one plot per grouping factor.
#' Samples clored by grouping factor.
#' Grouping factors are extracted from design.table.
#' Samples are explected to be separage fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return list of plot objects
#' @importFrom ggplot2 ggplot geom_boxplot theme labs
#' @export
C1.design.plot <- function(samples, design.table) {
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
calc.qmeans <- function(fq) {
qm <- as(quality(fq), "matrix")
row.means <- rowMeans(qm, na.rm=T)
return(row.means)
}
qmeans <- lapply(samples, calc.qmeans)
df <- data.frame(sampleid=rep(names(qmeans), lapply(qmeans, length)),
qmeans=unlist(qmeans), row.names=NULL)
df <- merge(df, design.table)
plots <- lapply(groups, function(g.factor) {
df <- df[order(df[g.factor]), ]
.e <- environment()
p <- ggplot(df, aes(factor(sampleid), qmeans, fill=factor(df[ ,g.factor])),
environment=.e)
p <- p + geom_boxplot(alpha=0.6, outlier.size=0)
p <- p + theme(axis.text.x=element_text(angle=45, hjust=1))
p <- p + guides(fill=guide_legend(title=g.factor))
p <- p + labs(x="Sampleid", y="Mean Read Quality")
})
names(plots) <- groups
return(plots)
}
#' Plot mean base quality at particualar position in the read.
#'
#' Line plot showing mean quality of the bases per position in the read.
#' One line per sample.
#' Samples are explected to be separage fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @import dplyr
#' @importFrom ggplot2 ggplot geom_line xlim labs
#' @export
C2plot <- function(samples) {
calc.qmeans <- function(fq) {
qm <- as(quality(fq), "matrix")
col.means <- colMeans(qm, na.rm=T)
}
q95rlen <- quantile(unlist(sapply(samples, width)), 0.95)
qmeans <- lapply(samples, calc.qmeans)
df <- data.frame(sampleid=rep(names(qmeans), lapply(qmeans, length)),
qmeans=unlist(qmeans), row.names=NULL)
df <- df %>% group_by(sampleid) %>% mutate(ones=1, pos=cumsum(ones))
p <- ggplot(df, aes(pos, qmeans, group=sampleid, colour=sampleid))
p <- p + geom_line(alpha=0.4) + xlim(0, q95rlen)
p <- p + labs(x="Position in the Read", y="Mean Base Quality", colour="Sampleid")
}
#' Plot mean base quality at particualar position in the read.
#'
#' Line plot showing mean quality of the bases per position in the read.
#' One line per sample.
#' Samples are explected to be separage fastq files.
#' There is one plot per grouping factor.
#' Samples colored by grouping factor.
#' Grouping factors are extracted from design.table.
#' @param samples ShortReadQ object from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return list of plot objects
#' @import dplyr
#' @importFrom ggplot2 ggplot geom_line xlim labs
#' @export
C2.design.plot <- function(samples, design.table) {
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
calc.qmeans <- function(fq) {
qm <- as(quality(fq), "matrix")
col.means <- colMeans(qm, na.rm=T)
}
q95rlen <- quantile(unlist(sapply(samples, width)), 0.95)
qmeans <- lapply(samples, calc.qmeans)
df <- data.frame(sampleid=rep(names(qmeans), lapply(qmeans, length)),
qmeans=unlist(qmeans), row.names=NULL)
df <- df %>% group_by(sampleid) %>% mutate(ones=1, pos=cumsum(ones))
df <- merge(df, design.table)
plots <- lapply(groups, function(g.factor){
.e <- environment()
p <- ggplot(df, aes(pos, qmeans, group=sampleid, colour=factor(df[ ,g.factor])),
environment=.e)
p <- p + geom_line(alpha=0.4) + xlim(0, q95rlen)
p <- p + guides(colour=guide_legend(title=g.factor))
p <- p + labs(x="Position in the Read", y="Mean Base Quality")
})
names(plots) <- groups
return(plots)
}
#' Plot fraction of reads with partucular quality or higher per position.
#'
#' 4 line plots, for qualities 18, 20, 24 and 28, showing fraction
#' of reads with one of those qualities per position in the read.
#' One line per sample.
#' Samples are explected to be separage fastq files.
#' There is one plot per grouping factor.
#' Samples colored by grouping factor.
#' Grouping factors are extracted from design.table.
#' @param samples ShortReadQ object from package ShortRead
#' @param fqc FastQA from package ShortRead
#' @param design.table data.frame holds information about experimantal design
#' @return list of plot objects
#' @import dplyr
#' @importFrom ggplot2 ggplot geom_line facet_wrap ylim labs
#' @export
C3.design.plot <- function(samples, fqc, design.table) {
groups <- names(design.table)
groups <- groups[groups != "sampleid"]
border.quals <- c(18, 20, 24, 28)
q95rlen <- quantile(unlist(sapply(samples, width)), 0.95)
pcq <- fqc[["perCycle"]]$quality
pcq$sampleid <- str_replace(pcq$lane, "\\.f(ast)?q", "")
pcq <- merge(pcq, design.table, by='sampleid')
pcq <- pcq %>% group_by(sampleid, Cycle) %>% mutate(CycleCounts=sum(Count),
CountFrac=Count/CycleCounts,
ScoreCumSum=cumsum(CountFrac)) %>% ungroup()
pcq <- pcq[pcq$Cycle <= q95rlen, ]
subpcq <- pcq[pcq$Score %in% border.quals, ]
plots <- lapply(c(groups), function(g.factor){
.e <- environment()
p <- ggplot(subpcq, environment=.e)
p <- p + geom_line(aes_string(x='Cycle', y='1 - ScoreCumSum',
group='sampleid', colour=g.factor),
alpha=I(0.4))
p <- p + facet_wrap(~Score) + ylim(0,1)
p <- p + guides(colour=guide_legend(title=g.factor))
p <- p + labs(x="Position in the Read", y="Fraction of Reads")
})
names(plots) <- groups
return(plots)
}
#' Plot fraction of reads with partucular quality or higher per position.
#'
#' 4 line plots, for qualities 18, 20, 24 and 28, showing fraction
#' of reads with one of those qualities per position in the read.
#' One line per sample.
#' Samples are explected to be separage fastq files.
#' @param fqc FastQA from package ShortRead
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @import dplyr
#' @importFrom ggplot2 ggplot geom_line facet_wrap ylim labs
#' @export
C3plot <- function(fqc, samples) {
border.quals <- c(18, 20, 24, 28)
q95rlen <- quantile(unlist(sapply(samples, width)), 0.95)
pcq <- fqc[["perCycle"]]$quality
pcq$sampleid <- str_replace(pcq$lane, "\\.f(ast)?q", "")
pcq <- pcq %>% group_by(sampleid, Cycle) %>% mutate(CycleCounts=sum(Count),
CountFrac=Count/CycleCounts,
ScoreCumSum=cumsum(CountFrac)) %>% ungroup()
pcq <- pcq[pcq$Cycle <= q95rlen, ]
subpcq <- pcq[pcq$Score %in% border.quals, ]
p <- ggplot(subpcq)
p <- p + geom_line(aes(Cycle, 1-ScoreCumSum, group=sampleid, colour=sampleid), alpha=I(0.4))
p <- p + facet_wrap(~Score) + ylim(0,1)
p <- p + labs(x="Position in the Read", y="Fraction of Reads", colour="Sampleid")
}
#' Plot base frequnecy for first 30 nt.
#'
#' Linepolots representing frequencies of bases, one line per base,
#' for first 30 nt in the read (from 5')
#' One plot per sample.
#' Each sample is a fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line scale_x_discrete facet_wrap labs
#' @importFrom ggplot2 element_rect
#' @import dplyr
#' @export
D1plot <- function(samples) {
countLetterFreq <- function(fq) {
bases <- c("A", "C", "G", "T")
sr <- sread(fq)
alpha.freq <- alphabetByCycle(sr, bases)[, 1:30]
alpha.freq <- data.frame(alpha.freq)
colnames(alpha.freq) <- 1:30
alpha.freq$base <- rownames(alpha.freq)
df <- melt(alpha.freq, id=c("base"))
colnames(df) <- c("base", "pos", "count")
df <- df %>% group_by(pos) %>% mutate(total.count=sum(count),
frac.count=count/total.count)
# df <- ddply(df, .(pos), mutate,
# total.count=sum(count),
# frac.count=count/total.count)
return(df)
}
df.list <- lapply(samples, countLetterFreq)
df <- do.call("rbind", df.list)
df$sampleid <- rep(names(samples),each=4*30)
# df$sampleid <-sapply(rownames(df),
# function(rn) {str_split(rn, "\\.")[[1]][1]},
# simplify=TRUE, USE.NAMES=FALSE)
p <- ggplot(df, aes(pos, frac.count, group=base, colour=base))
p <- p + geom_line(alpha=0.4) + scale_x_discrete(breaks=seq(0, 30, 5))
p <- p + ylim(0,1)
p <- p + facet_wrap(~sampleid)
p <- p + labs(x="Position in the Read", y="Base Frequency", colour="Base")
p <- p + theme(panel.background=element_rect(fill="white", colour="grey"))
}
#' Plot base frequnecy for last 30 nt.
#'
#' Lineplots representing frequencies of bases, one line per base,
#' for last 30 nt in the read (from 3')
#' One plot per sample.
#' Each sample is a fastq files.
#' @param samples ShortReadQ object from package ShortRead
#' @return plot object
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot geom_line scale_x_discrete facet_wrap labs
#' @importFrom ggplot2 element_rect
#' @import dplyr
#' @export
D2plot <- function(samples) {
countLetterFreq <- function(fq) {
sr <- reverse(sread(fq))
bases <- c("A", "C", "G", "T")
alpha.freq <- alphabetByCycle(sr, bases)[, 1:30]
alpha.freq <- data.frame(alpha.freq)
colnames(alpha.freq) <- -1:-30
alpha.freq$base <- rownames(alpha.freq)
df <- melt(alpha.freq, id=c("base"))
colnames(df) <- c("base", "pos", "count")
df <- df %>% group_by(pos) %>% mutate(total.count=sum(count),
frac.count=count/total.count)
return(df)
}
df.list <- lapply(samples, countLetterFreq)
df <- do.call("rbind", df.list)
df$sampleid <- rep(names(samples),each=4*30)
# df$sampleid <-sapply(rownames(df),
# function(rn) {str_split(rn, "\\.")[[1]][1]},
# simplify=TRUE, USE.NAMES=FALSE)
p <- ggplot(df, aes(pos, frac.count, group=base, colour=base))
p <- p + geom_line(alpha=0.4) + scale_x_discrete(breaks=seq(-30, 0, 5))
p <- p + ylim(0,1)
p <- p + facet_wrap(~sampleid)
p <- p + labs(x="Position in the Read", y="Base Frequency", colour="Base")
p <- p + theme(panel.background=element_rect(fill="white", colour="white"))
}
|
b0a6ca47f33aedfd2f849fe1e816e4986033733c | 202e50539b75772f097e2df774bba7357724f233 | /Tarea_D/Tarea_D.R | 42d90b407c647079425cf43ac44a431531447a53 | [] | no_license | MickAmest/Tareas-BioinfRepro2020_MickAmest | 31dca19befdf4a0cdbf0159e879e6066b0fe1e68 | 66981c093e53c5171c132c56f2d38259a3524d86 | refs/heads/master | 2020-12-14T15:39:41.231361 | 2020-06-04T19:57:14 | 2020-06-04T19:57:14 | 234,791,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | Tarea_D.R | ### Tarea D. Script to plot iris data on separete boxes.
### Miguel Amaro, Mar 2020
library(ggplot2)
## See data
head(iris)
## Plot
ggplot(data=iris, aes(x= Sepal.Length, y=Sepal.Width)) +
geom_point(aes(color=Species)) +
facet_grid(cols = vars(Species)) + ## Vertical panels, by species.
labs(x="Largo del sépalo", y="Ancho del sépalo")
|
0d17d50fbacdb375ff0daa238d63dc592aad86f4 | 914f3d50eb45e209b7fa64c753dbb7ce162360a0 | /99_ect/using_github_source_code.R | b1282a988d631b290bd4b5b6546e4fbf9b6deff2 | [] | no_license | snapbuy/R_edu | afbbe86670f28ee6a5fb421b52797a4d2c7f4f54 | 39080dc55f48503778ae27e0890e89d9daf12788 | refs/heads/master | 2023-08-17T15:50:27.921589 | 2021-09-15T16:19:16 | 2021-09-15T16:19:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 415 | r | using_github_source_code.R | source_from_github = function(url){
library("RCurl")
url = gsub(pattern = "//github", replacement = "//raw.githubusercontent", x = url)
url = gsub(pattern = "/blob/", replacement = "/", x = url)
script = getURL(url, ssl.verifypeer = FALSE)
eval(parse(text = script))
}
url_mine = "https://github.com/encaion/R_edu/blob/master/30_visualization/ggplot/statVis/stat_vis_01.R"
source_from_github(url_mine)
|
0cb88c6aadaac9620efd29934d91ba0fde925176 | f48e25ade098aef7aa6f9fde4927bbf2b2092d14 | /man/dasl.tuition_all_schools_2016.Rd | fb4f8cb937018c7dbb1e9b799fb7b345216c9bbd | [] | no_license | sigbertklinke/mmstat.data | 23fa7000d5a3f776daec8b96e54010d85515dc7d | 90f698e09b4aac87329b0254db28d835014c5ecb | refs/heads/master | 2020-08-18T04:29:05.613265 | 2019-10-17T11:44:57 | 2019-10-17T11:44:57 | 215,747,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 659 | rd | dasl.tuition_all_schools_2016.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.tuition_all_schools_2016}
\alias{dasl.tuition_all_schools_2016}
\title{Tuition All Schools 2016}
\format{7703 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/tuition-all-schools-2016/?sf_paged=40}{Tuition All Schools 2016}
}
\description{
Tuition All Schools 2016
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
\url{https://collegescorecard.ed.gov/data/}
}
\concept{Displaying Quantitative Data}
\concept{Summarizing Quantitative Data}
|
5b0f2f4d7ebab613bbe66a1d9dd879f764946621 | 2a674de39135d3111a2df016884039cdf4b485d9 | /plot3.R | 52456f21fee7f819f177471270b56e4634fdf4e2 | [] | no_license | gahpeng/ExData_Plotting1 | a0baebfb0b7801d8c8058fbbe7637ff2af0622e1 | 53a4c59447c5ff3718d3a78bc8c94b672fead4ac | refs/heads/master | 2021-01-15T17:07:28.811666 | 2015-08-07T17:39:30 | 2015-08-07T17:39:30 | 40,346,191 | 0 | 0 | null | 2015-08-07T07:02:04 | 2015-08-07T07:02:04 | null | UTF-8 | R | false | false | 1,144 | r | plot3.R |
setwd("C:/gahpeng/coursera/04_Exploratory Data Analysis/Course Project 1")
#load data to R
datafilename <- "./household_power_consumption.txt"
data <- read.table(datafilename, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#using data from the dates 2007-02-01 and 2007-02-02
data$newdate<-strptime(data$Date, "%d/%m/%Y")
usedData <- data[data$newdate %in% c(strptime("01/02/2007", "%d/%m/%Y"),strptime("02/02/2007", "%d/%m/%Y")) ,]
#create new colume with date time
usedData$newdatetime<- strptime(paste(usedData$Date, usedData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels
png("plot3.png", width=480, height=480, units="px")
plot(usedData$newdatetime,as.numeric(usedData$Sub_metering_1), type="l", ylab="Energy Sub Metering", xlab="")
lines(usedData$newdatetime, as.numeric(usedData$Sub_metering_2), type="l", col="red")
lines(usedData$newdatetime, as.numeric(usedData$Sub_metering_3), type="l", col="blue")
legend(x="topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off()
|
687ea95eb21c65bf9043f96baa5c56bde48dabc5 | 08546210198bfcf0a3b4c2718113e94c3cc6c3c3 | /traits/R/OldCode/CN Leaves.R | d381e1d7bf0f326b5a84e1d6986a081eb50b591a | [] | no_license | chencaf/PFTC_1_2_China | bdbbccd9b588abf8f395cf362e0faa5a0d431570 | 1a0002bfb3f324c6c4c998bacbe535ffa647c5f0 | refs/heads/master | 2022-10-15T13:28:14.921764 | 2020-06-09T07:44:59 | 2020-06-09T07:44:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 376 | r | CN Leaves.R | library(readxl)
CNLeafs <- leafarea2015 <- read_csv(file = "CNAnalysis.csv")
Annjeanette <- read_excel(path = "ChinaLeafTraitData_senttogroup.xlsx")
Annjeanette <- Annjeanette %>%
select(Full_Envelope_Name, `stoich vial label`)
ddd <- CNLeafs %>%
left_join(Annjeanette, by = c("Full_Envelope_Name")) %>%
sort(`stoich vial label`)
write_csv(ddd, "CNAnalysis2.csv")
|
2a4eed0e970ed753d9b6f18aa1ab8f01e0d28c5e | 2ef5781f16b0aa68471519dfc26a2491b5a574dc | /Sentiment analysis_AFINN.R | b77ac3e1f77420a3888710dc177c9bbe0720e212 | [] | no_license | bhavanaramesh/Text-Analytics-on-tweets | 88f04949ce2c006d1d7f9ab93b567f76506c6243 | 380b5caefc11f14a852fd3b936d1d7e433479ea9 | refs/heads/master | 2020-06-12T02:40:03.436517 | 2019-06-27T23:02:01 | 2019-06-27T23:02:01 | 194,171,030 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | r | Sentiment analysis_AFINN.R | library(tidyverse)
library(tidytext)
library(glue)
library(stringr)
library(tm)
filepath <- scan("Sentiment_Analysis_corpusdata.csv", what= "character", sep=NULL)
docs <- Corpus(VectorSource(filepath))
inspect(docs)
fileName <- trimws(docs)
tokens <- data_frame(text = fileName) %>% unnest_tokens(word, text) %>% anti_join(stop_words)
#using afinn
tokenss <- tokens %>%
inner_join(get_sentiments("afinn")) %>%
count(word, score, sort = TRUE) %>%
ungroup()
library(ggplot2)
library(igraph)
tokenss %>%
group_by(score) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = score)) +
geom_col(show.legend = FALSE) +
facet_wrap(~score, scales = "free_y") +
labs(y = "AFINN Sentiment scores for Apple watch series 4",
x = NULL) +
coord_flip() + ggtitle("Twitter Sentiment") + theme(plot.title = element_text(hjust = 1.0)) +
scale_fill_gradient(low="red", high="green")
#write.csv(tokenss, file= "Score_AFINN.csv")
|
1a1b141f4b8d17bee44dfc73fdb85baae61f0f02 | 1e620d83967acb48dfba21436d88bf4697904ba0 | /scripts/05-integr_singlecell_cbps.R | 960c13550cc1429beb0f18f4e5a59f6582dbd71f | [
"MIT"
] | permissive | umr1283/LGA_HSPC_PAPER | e6c32af5fd7303dd93b802b7a2d04704d421b305 | 5ff879dc64d555452d8ee980b242a957f412b6c6 | refs/heads/main | 2023-04-17T15:57:57.025596 | 2022-07-18T15:27:35 | 2022-07-18T15:27:35 | 404,302,229 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,178 | r | 05-integr_singlecell_cbps.R | #integr CBPs datasets thanks to hematomap
out<-"outputs/05-integr_singlecell_cbps"
dir.create(out)
source("scripts/utils/new_utils.R")
source("../singlecell/scripts/utils/HTO_utils.R")
library(Seurat)
####QC filtering and Demultiplexing data ####
# remove low quality/outlyers cells (doublet with nGene/RNA hi/lo or percent.mt Hi)
# threshold : if not 2 subpop : median +/- 4* median absolute deviation(mad), else cutoff based on distribution
#cbp4####
sample<-"cbp4"
umis<- Read10X("~/RUN/Run_539_10x_standard/Output/cellranger_count_cbp4_tri/single_cell_barcode_539_HTO_cbp4b/outs/filtered_feature_bc_matrix/")$`Gene Expression`
cbp4_all <- CreateSeuratObject(counts = umis,project = sample)
cbp4_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp4_all, pattern = "^MT-")
VlnPlot(object = cbp4_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp4_all, features ="percent.mt")+geom_hline(yintercept = median(cbp4_all$percent.mt)+ 4*mad(cbp4_all$percent.mt) )
p2<-VlnPlot(object = cbp4_all, features ="nCount_RNA")+geom_hline(yintercept = 60000)
p3<-VlnPlot(object = cbp4_all, features ="nFeature_RNA")+geom_hline(yintercept = 7000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp4_QC_cells_metrics.png"))
cbp4_qc<-subset(cbp4_all,percent.mt<median(cbp4_all$percent.mt)+ 4*mad(cbp4_all$percent.mt)&nCount_RNA<60000&nFeature_RNA<7000)
#reassign samples
cbp4.htos<-as.matrix(Read10X("~/RUN/Run_539_10x_standard/Output/cellranger_count_cbp4_tri/single_cell_barcode_539_HTO_cbp4b/outs/filtered_feature_bc_matrix/")$`Antibody Capture`)
rownames(cbp4.htos)<-c("ctrlM555",
"ctrlM518",
"ctrlM537",
"lgaF551",
"lgaF543")
cbp4_qc[["HTO"]] <- CreateAssayObject(counts = cbp4.htos[,colnames(cbp4_qc)])
# Normalize HTO data, here we use centered log-ratio (CLR) transformation
cbp4_qc <- NormalizeData(cbp4_qc, assay = "HTO", normalization.method = "CLR")
cbp4_qc <- HTODemux(cbp4_qc, assay = "HTO",positive.quantile = 0.95)
table(cbp4_qc$HTO_classification.global)
# Doublet Negative Singlet
# 1017 2373 2487
#sex based recovery
cbp4_qc<-checkHTOSex(cbp4_qc,gene_male="RPS4Y1",gene_female="XIST")
# calculating pct of real singlet male/female cells expressing sex marker ( save in 'misc' of 'HTO' assay):
# for male : 82 % express the male gene
# for female : 97% express the female gene
#as ++ doublet/singlet, and only 82% male cells express RPS4y1, split in 2 based hi / lo RNA count
VlnPlot(object = cbp4_all, features ="nCount_RNA")+geom_hline(yintercept = 8000)
cbp4_qc_lo<-subset(cbp4_qc,nCount_RNA<8000)
cbp4_qc_lo <- NormalizeData(cbp4_qc_lo, assay = "HTO", normalization.method = "CLR")
cbp4_qc_lo <- HTODemux(cbp4_qc_lo, assay = "HTO",positive.quantile = 0.9999)
table(cbp4_qc_lo$HTO_classification.global)
# Doublet Negative Singlet
# 323 605 739
cbp4_qc_lo<-checkHTOSex(cbp4_qc_lo,gene_male="RPS4Y1",gene_female="XIST")
# for male : 52 % express the male gene
# for female : 85 % express the female gene
cbp4_qc_lo<-sexBasedHTOAssign(cbp4_qc_lo)
cbp4_qc_lo_s<-subset(cbp4_qc_lo,new.HTO_classif.global=="Singlet")
cbp4_qc_hi<-subset(cbp4_qc,nCount_RNA>=8000)
cbp4_qc_hi <- NormalizeData(cbp4_qc_hi, assay = "HTO", normalization.method = "CLR")
cbp4_qc_hi <- HTODemux(cbp4_qc_hi, assay = "HTO",positive.quantile = 0.95)
table(cbp4_qc_hi$HTO_classification.global)
# Doublet Negative Singlet
# 880 1511 1819
cbp4_qc_hi<-checkHTOSex(cbp4_qc_hi,gene_male="RPS4Y1",gene_female="XIST")
# for male : 100 % express the male gene
# for female : 100 % express the female gene
cbp4_qc_hi<-sexBasedHTOAssign(cbp4_qc_hi)
# Bad_HTO_assign Doublet Negative Singlet
# 112 1201 446 2451
cbp4_qc_hi_s<-subset(cbp4_qc_hi,new.HTO_classif.global=="Singlet")
#merge the 2
cbp4_qc_s<-merge(cbp4_qc_hi_s,cbp4_qc_lo_s)
cbp4_qc_s$sample<-cbp4_qc_s$new.ID
table(cbp4_qc_s$sample)
# ctrlM518 ctrlM537 ctrlM555 lgaF543 lgaF551
# 678 470 619 1013 745
saveRDS(cbp4_qc_s,fp(out,"cbp4.rds"))
#cbp2####
sample<-"cbp2"
umis<- Read10X("~/RUN/Run_539_10x_standard/Output/cellranger_count_cbp2b_tri/single_cell_barcode_539_HTO_cbp2b/outs/filtered_feature_bc_matrix/")$`Gene Expression`
cbp2_all <- CreateSeuratObject(counts = umis,project = sample)
cbp2_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp2_all, pattern = "^MT-")
VlnPlot(object = cbp2_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp2_all, features ="percent.mt")+geom_hline(yintercept = median(cbp2_all$percent.mt)+ 4*mad(cbp2_all$percent.mt) )
p2<-VlnPlot(object = cbp2_all, features ="nCount_RNA")+geom_hline(yintercept = 60000)
p3<-VlnPlot(object = cbp2_all, features ="nFeature_RNA")+geom_hline(yintercept = 6500 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp2_QC_cells_metrics.png"))
cbp2_qc<-subset(cbp2_all,percent.mt<median(cbp2_all$percent.mt)+ 4*mad(cbp2_all$percent.mt)&nCount_RNA<60000&nFeature_RNA<6500)
#reassign samples
cbp2.htos<-as.matrix(Read10X("~/RUN/Run_539_10x_standard/Output/cellranger_count_cbp2b_tri/single_cell_barcode_539_HTO_cbp2b/outs/filtered_feature_bc_matrix/")$`Antibody Capture`)
rownames(cbp2.htos)<-c("ctrlF528",
"ctrlM539",
"iugrM553",
"iugrM558",
"lgaM549",
"lgaF532")
cbp2_qc[["HTO"]] <- CreateAssayObject(counts = cbp2.htos[,colnames(cbp2_qc)])
cbp2_qc <- NormalizeData(cbp2_qc, assay = "HTO", normalization.method = "CLR")
cbp2_qc <- HTODemux(cbp2_qc, assay = "HTO",positive.quantile = 0.9999)
table(cbp2_qc$HTO_classification.global)
# Doublet Negative Singlet
# 9536 1143 98
#as ++ doublet split in 2 based hi / lo RNA count
VlnPlot(object = cbp2_qc, features ="nCount_RNA",group.by = "orig.ident")+geom_hline(yintercept = 6000)
cbp2_qc_lo<-subset(cbp2_qc,nCount_RNA<6000)
cbp2_qc_lo <- NormalizeData(cbp2_qc_lo, assay = "HTO", normalization.method = "CLR")
cbp2_qc_lo <- HTODemux(cbp2_qc_lo, assay = "HTO",positive.quantile = 0.95)
table(cbp2_qc_lo$HTO_classification.global)
# Doublet Negative Singlet
# 1749 1002 49
cbp2_qc_lo<-checkHTOSex(cbp2_qc_lo,gene_male="RPS4Y1",gene_female="XIST")
# for male : 52 % express the male gene
# for female : 36 % express the female gene
cbp2_qc_hi<-subset(cbp2_qc,nCount_RNA>=6000)
cbp2_qc_hi <- NormalizeData(cbp2_qc_hi, assay = "HTO", normalization.method = "CLR")
cbp2_qc_hi <- HTODemux(cbp2_qc_hi, assay = "HTO",positive.quantile = 0.99)
table(cbp2_qc_hi$HTO_classification.global)
# Doublet Negative Singlet
# 1323 1844 4810
cbp2_qc_hi<-checkHTOSex(cbp2_qc_hi,gene_male="RPS4Y1",gene_female="XIST")
# for male : 99 % express the male gene
# for female : 98 % express the female gene
cbp2_qc_hi<-sexBasedHTOAssign(cbp2_qc_hi)
# Bad_HTO_assign Doublet Negative Singlet
# 84 1596 503 5794
#merge the 2
cbp2_qc<-merge(cbp2_qc_hi,cbp2_qc_lo)
#demultiplex on SNP info
mat_gt<-fread("../lineage_tracing/outputs/cbp2/25pct_det_and_variables_snps_barcodes_genotype_matrices_imputed.tsv")
mat_gt<-as.matrix(data.frame(mat_gt,row.names = "snp"))
dim(mat_gt)
colnames(mat_gt)<-str_replace(colnames(mat_gt),"\\.","-")
sum(colnames(cbp2_qc)%in%colnames(mat_gt)) #8292
cbp2_qc_snp<-cbp2_qc[,colnames(cbp2_qc)%in%colnames(mat_gt)]
cbp2_qc_snp[["SNP"]]<-CreateAssayObject(data=mat_gt[,colnames(cbp2_qc_snp)])
DefaultAssay(cbp2_qc_snp)<-"SNP"
cbp2_qc_snp<-FindVariableFeatures(cbp2_qc_snp)
cbp2_qc_snp<-ScaleData(cbp2_qc_snp)
cbp2_qc_snp<-RunPCA(cbp2_qc_snp)
cbp2_qc_snp<-RunUMAP(cbp2_qc_snp,dims = 1:10)
cbp2_qc_snp<-FindNeighbors(cbp2_qc_snp,dims = 1:10)
cbp2_qc_snp<-FindClusters(cbp2_qc_snp,resolution = 0.5)
DimPlot(cbp2_qc_snp,label=T,group.by = c("seurat_clusters","new.ID"))
new.idents<-c("lgaM549",
"iugrM558",
"ctrlM539",
"lgaF532",
"iugrM553",
"lgaM549",
"ctrlF528",
"Doublet",
"ctrlM539",
"iugrM558",
"Doublet",
"Doublet",
"Doublet",
"Doublet"
)
names(new.idents)<-levels(cbp2_qc_snp)
cbp2_qc_snp<-RenameIdents(cbp2_qc_snp,new.idents)
DimPlot(cbp2_qc_snp,label = T)
cbp2_qc_snp[["snp.ID"]]<-Idents(cbp2_qc_snp)
cbp2_qc_s<-subset(cbp2_qc_snp,snp.ID!="Doublet")
cbp2_qc_s$sample<-cbp2_qc_s$snp.ID
table(cbp2_qc_s$sample)
# lgaM549 iugrM558 ctrlM539 lgaF532 iugrM553 ctrlF528
# 2459 1381 1358 929 768 692
saveRDS(cbp2_qc_s,fp(out,"cbp2.rds"))
#cbp3####
sample<-"cbp3"
umis<- Read10X("../singlecell/datasets/cbp3/filtered_feature_bc_matrix/")
cbp3_all <- CreateSeuratObject(counts = umis,project = sample)
cbp3_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp3_all, pattern = "^MT-")
VlnPlot(object = cbp3_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp3_all, features ="percent.mt")+geom_hline(yintercept = median(cbp3_all$percent.mt)+ 4*mad(cbp3_all$percent.mt) )
p2<-VlnPlot(object = cbp3_all, features ="nCount_RNA")+geom_hline(yintercept = 60000)
p3<-VlnPlot(object = cbp3_all, features ="nFeature_RNA")+geom_hline(yintercept = 6500 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp3_QC_cells_metrics.png"))
cbp3_qc<-subset(cbp3_all,percent.mt<median(cbp3_all$percent.mt)+ 4*mad(cbp3_all$percent.mt)&nCount_RNA<60000&nFeature_RNA<6500)
#reassign samples
cbp3.htos<-as.matrix(Read10X("../singlecell/datasets/cbp3/HTO_CBP3/umi_count/",gene.column = 1))[1:3,]
rownames(cbp3.htos)<-c("ctrlF523",
"iugrF524",
"lgaF559")
colnames(cbp3.htos)<-paste0(colnames(cbp3.htos),"-1")
sum(colnames(cbp3_qc)%in%colnames(cbp3.htos)) #2570/2590
common_bc<-intersect(colnames(cbp3_qc),colnames(cbp3.htos))
cbp3_qc<-cbp3_qc[,common_bc]
cbp3_qc[["HTO"]] <- CreateAssayObject(counts = cbp3.htos[,common_bc])
# Normalize HTO data, here we use centered log-ratio (CLR) transformation
cbp3_qc <- NormalizeData(cbp3_qc, assay = "HTO", normalization.method = "CLR")
cbp3_qc <- HTODemux(cbp3_qc, assay = "HTO",positive.quantile = 0.98)
table(cbp3_qc$HTO_classification.global)
#++soublet need split in 2
VlnPlot(object = cbp3_all, features ="nFeature_RNA")+geom_hline(yintercept = 800 )
cbp3_qc_lo<-subset(cbp3_qc,nFeature_RNA<800)
cbp3_qc_lo <- NormalizeData(cbp3_qc_lo, assay = "HTO", normalization.method = "CLR")
cbp3_qc_lo <- HTODemux(cbp3_qc_lo, assay = "HTO",positive.quantile = 0.95)
table(cbp3_qc_lo$HTO_classification.global)
# Doublet Negative Singlet
# 321 411 108
cbp3_qc_hi<-subset(cbp3_qc,nFeature_RNA>=800)
cbp3_qc_hi <- NormalizeData(cbp3_qc_hi, assay = "HTO", normalization.method = "CLR")
cbp3_qc_hi <- HTODemux(cbp3_qc_hi, assay = "HTO",positive.quantile = 0.98)
table(cbp3_qc_hi$HTO_classification.global)
# Doublet Negative Singlet
# 278 723 729
cbp3_qc<-merge(cbp3_qc_hi,cbp3_qc_lo)
cbp3_qc_s<-subset(cbp3_qc,HTO_classification.global=="Singlet")
cbp3_qc_s$sample<-cbp3_qc_s$hash.ID
table(cbp3_qc_s$sample)
# ctrlF523 iugrF524 lgaF559
# 206 368 263
saveRDS(cbp3_qc_s,fp(out,"cbp3.rds"))
#cbp8####
sample<-"cbp8"
umis<- Read10X("~/RUN/Run_554_10x_standard/Output/cellranger_count_hto/single_cell_barcode_run_554_cbp8/outs/filtered_feature_bc_matrix/")$`Gene Expression`
cbp8_all <- CreateSeuratObject(counts = umis,project = sample)
cbp8_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp8_all, pattern = "^MT-")
VlnPlot(object = cbp8_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp8_all, features ="percent.mt")+geom_hline(yintercept = median(cbp8_all$percent.mt)+ 4*mad(cbp8_all$percent.mt) )
p2<-VlnPlot(object = cbp8_all, features ="nCount_RNA")+geom_hline(yintercept = 40000)
p3<-VlnPlot(object = cbp8_all, features ="nFeature_RNA")+geom_hline(yintercept = 6000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp8_QC_cells_metrics.png"))
cbp8_qc<-subset(cbp8_all,percent.mt<median(cbp8_all$percent.mt)+ 4*mad(cbp8_all$percent.mt)&nCount_RNA<40000&nFeature_RNA<6000)
#reassign samples
cbp8.htos<-as.matrix(Read10X("~/RUN/Run_554_10x_standard/Output/cellranger_count_hto/single_cell_barcode_run_554_cbp8/outs/filtered_feature_bc_matrix/")$`Antibody Capture`)
rownames(cbp8.htos)<-c("ctrlM503",
"ctrlM530",
"lgaM556",
"lgaM496")
cbp8_qc[["HTO"]] <- CreateAssayObject(counts = cbp8.htos[,colnames(cbp8_qc)])
# Normalize HTO data, here we use centered log-ratio (CLR) transformation
cbp8_qc <- NormalizeData(cbp8_qc, assay = "HTO", normalization.method = "CLR")
cbp8_qc <- HTODemux(cbp8_qc, assay = "HTO",positive.quantile = 0.97)
table(cbp8_qc$HTO_classification.global)
# Doublet Negative Singlet
# 843 1367 3652
cbp8_qc_s<-subset(cbp8_qc,HTO_classification.global=="Singlet")
cbp8_qc_s$sample<-cbp8_qc_s$hash.ID
saveRDS(cbp8_qc_s,fp(out,"cbp8.rds"))
#cbp0_ctrl####
sample<-"cbp0_ctrl"
umis<- as.matrix(data.frame(fread("../singlecell/datasets/cbp0/CBP547.csv"),row.names=1))
dim(umis) #33538 6965
head(rownames(umis))
gene_trad<-TransEnsembltoSymbol(rownames(umis))
umis_f<-umis[gene_trad[hgnc_symbol!=""]$ensembl_gene_id,]
rownames(umis_f)<-gene_trad[hgnc_symbol!=""]$hgnc_symbol
sum(duplicated(rownames(umis_f))) #8
cbp0_ctrl_all <- CreateSeuratObject(counts = umis_f,project = sample)
cbp0_ctrl_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp0_ctrl_all, pattern = "^MT-")
VlnPlot(object = cbp0_ctrl_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp0_ctrl_all, features ="percent.mt")+
geom_hline(yintercept = median(cbp0_ctrl_all$percent.mt)+ 4*mad(cbp0_ctrl_all$percent.mt) )
p2<-VlnPlot(object = cbp0_ctrl_all, features ="nCount_RNA")+
geom_hline(yintercept = median(cbp0_ctrl_all$nCount_RNA)-2*mad(cbp0_ctrl_all$nCount_RNA))+
geom_hline(yintercept = 14000)
p3<-VlnPlot(object = cbp0_ctrl_all, features ="nFeature_RNA")+
geom_hline(yintercept = median(cbp0_ctrl_all$nFeature_RNA)-2*mad(cbp0_ctrl_all$nFeature_RNA) )+
geom_hline(yintercept = 2900)
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp0_ctrl_QC_cells_metrics.png"))
cbp0_ctrl_qc<-subset(cbp0_ctrl_all,percent.mt<median(cbp0_ctrl_all$percent.mt)+ 4*mad(cbp0_ctrl_all$percent.mt)&
nCount_RNA>median(cbp0_ctrl_all$nCount_RNA)-2*mad(cbp0_ctrl_all$nCount_RNA)&nCount_RNA<14000&
nFeature_RNA>median(cbp0_ctrl_all$nFeature_RNA)-2*mad(cbp0_ctrl_all$nFeature_RNA)&nFeature_RNA<2900)
cbp0_ctrl_qc #6478 cells
cbp0_ctrl_qc$sample<-"ctrlF547"
saveRDS(cbp0_ctrl_qc,fp(out,"cbp0_ctrl.rds"))
#cbp0_lga####
sample<-"cbp0_lga"
umis<- as.matrix(data.frame(fread("../singlecell/datasets/cbp0/CBP552.csv"),row.names=1))
dim(umis) #33538 3020
umis_f<-umis[gene_trad[hgnc_symbol!=""]$ensembl_gene_id,]
rownames(umis_f)<-gene_trad[hgnc_symbol!=""]$hgnc_symbol
sum(duplicated(rownames(umis_f))) #8
cbp0_lga_all <- CreateSeuratObject(counts = umis_f,project = sample)
cbp0_lga_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp0_lga_all, pattern = "^MT-")
VlnPlot(object = cbp0_lga_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
p1<-VlnPlot(object = cbp0_lga_all, features ="percent.mt")+geom_hline(yintercept = median(cbp0_lga_all$percent.mt)+ 4*mad(cbp0_lga_all$percent.mt) )
p2<-VlnPlot(object = cbp0_lga_all, features ="nCount_RNA")+geom_hline(yintercept = median(cbp0_lga_all$nCount_RNA)-2*mad(cbp0_lga_all$nCount_RNA))+geom_hline(yintercept = 20000)
p3<-VlnPlot(object = cbp0_lga_all, features ="nFeature_RNA")+geom_hline(yintercept = median(cbp0_lga_all$nFeature_RNA)-2*mad(cbp0_lga_all$nFeature_RNA) )+geom_hline(yintercept = 4000)
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp0_lga_QC_cells_metrics.png"))
cbp0_lga_qc<-subset(cbp0_lga_all,percent.mt<median(cbp0_lga_all$percent.mt)+ 4*mad(cbp0_lga_all$percent.mt)&nCount_RNA>median(cbp0_lga_all$nCount_RNA)-2*mad(cbp0_lga_all$nCount_RNA)&nCount_RNA<20000&nFeature_RNA>median(cbp0_lga_all$nFeature_RNA)-2*mad(cbp0_lga_all$nFeature_RNA)&nFeature_RNA<4000)
cbp0_lga_qc #2897 cells
cbp0_lga_qc$sample<-"lgaF552"
saveRDS(cbp0_lga_qc,fp(out,"cbp0_lga.rds"))
#cbp6a####
sample<-"cbp6a"
umis<- Read10X("~/RUN/Run_538_10x_standard/Output/cellranger_count/run_538_10x-cbp6-a/outs/filtered_feature_bc_matrix/")
cbp6a_all <- CreateSeuratObject(counts = umis,project = sample)
cbp6a_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp6a_all, pattern = "^MT-")
VlnPlot(object = cbp6a_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp6a_all$percent.mt)+ 4*mad(cbp6a_all$percent.mt)
thr_ge<-median(cbp6a_all$nFeature_RNA)- 2*mad(cbp6a_all$nFeature_RNA)
thr_rn<-median(cbp6a_all$nCount_RNA)-2*mad(cbp6a_all$nCount_RNA)
min(cbp6a_all$nCount_RNA)
p1<-VlnPlot(object = cbp6a_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp6a_all, features ="nCount_RNA")+geom_hline(yintercept = 30000)
p3<-VlnPlot(object = cbp6a_all, features ="nFeature_RNA")+geom_hline(yintercept = 220 )+geom_hline(yintercept = 5000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp6a_QC_cells_metrics.png"))
cbp6a_qc<-subset(cbp6a_all,percent.mt<median(cbp6a_all$percent.mt)+ 4*mad(cbp6a_all$percent.mt)&nCount_RNA<30000&nFeature_RNA>220&nFeature_RNA<5000)
#reassign samples
VlnPlot(cbp6a_qc,c("XIST","RPS4Y1"),group.by = "orig.ident")
sum(cbp6a_qc@assays$RNA@data["XIST",]>0)
sum(cbp6a_qc@assays$RNA@data["RPS4Y1",]>0)
cbp6a_qc@meta.data[cbp6a_qc@assays$RNA@data["XIST",]>0&
cbp6a_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"Doublet"
cbp6a_qc@meta.data[cbp6a_qc@assays$RNA@data["XIST",]>0&
cbp6a_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"ctrlF544"
cbp6a_qc@meta.data[cbp6a_qc@assays$RNA@data["XIST",]==0&
cbp6a_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"lgaM556"
cbp6a_qc@meta.data[cbp6a_qc@assays$RNA@data["XIST",]==0&
cbp6a_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"Negative"
table(cbp6a_qc@meta.data$sex.ID)
# ctrlF544 Doublet lgaM556 Negative
# 2098 456 3596 2435
cbp6a_qc_s<-subset(cbp6a_qc,sex.ID!="Doublet"&sex.ID!="Negative")
cbp6a_qc_s$sample<-cbp6a_qc_s$sex.ID
table(cbp6a_qc_s$sample)
# ctrlF544 lgaM556
# 2098 3596
saveRDS(cbp6a_qc_s,fp(out,"cbp6a.rds"))
#cbp6b####
sample<-"cbp6b"
umis<- Read10X("~/RUN/Run_538_10x_standard/Output/cellranger_count/run_538_10x-cbp6-b/outs/filtered_feature_bc_matrix/")
cbp6b_all <- CreateSeuratObject(counts = umis,project = sample)
cbp6b_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp6b_all, pattern = "^MT-")
VlnPlot(object = cbp6b_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp6b_all$percent.mt)+ 4*mad(cbp6b_all$percent.mt)
thr_ge<-median(cbp6b_all$nFeature_RNA)- 2*mad(cbp6b_all$nFeature_RNA)
thr_rn<-median(cbp6b_all$nCount_RNA)-2*mad(cbp6b_all$nCount_RNA)
p1<-VlnPlot(object = cbp6b_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp6b_all, features ="nCount_RNA")+geom_hline(yintercept = 30000)
p3<-VlnPlot(object = cbp6b_all, features ="nFeature_RNA")+geom_hline(yintercept = thr_ge )+geom_hline(yintercept = 5000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp6b_QC_cells_metrics.png"))
cbp6b_qc<-subset(cbp6b_all,percent.mt< thr_mt&nCount_RNA<30000&nFeature_RNA>thr_ge&nFeature_RNA<5000)
#reassign samples
VlnPlot(cbp6b_qc,c("XIST","RPS4Y1"),group.by = "orig.ident")
sum(cbp6b_qc@assays$RNA@data["XIST",]>0)
sum(cbp6b_qc@assays$RNA@data["RPS4Y1",]>0)
cbp6b_qc@meta.data[cbp6b_qc@assays$RNA@data["XIST",]>0&
cbp6b_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"Doublet"
cbp6b_qc@meta.data[cbp6b_qc@assays$RNA@data["XIST",]>0&
cbp6b_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"ctrlF545"
cbp6b_qc@meta.data[cbp6b_qc@assays$RNA@data["XIST",]==0&
cbp6b_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"lgaM533"
cbp6b_qc@meta.data[cbp6b_qc@assays$RNA@data["XIST",]==0&
cbp6b_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"Negative"
table(cbp6b_qc@meta.data$sex.ID)
# ctrlF545 Doublet lgaM533 Negative
# 875 66 2437 363
cbp6b_qc_s<-subset(cbp6b_qc,sex.ID!="Doublet"&sex.ID!="Negative")
cbp6b_qc_s$sample<-cbp6b_qc_s$sex.ID
table(cbp6b_qc_s$sample)
# ctrlF545 lgaM533
# 875 2437
saveRDS(cbp6b_qc_s,fp(out,"cbp6b.rds"))
#cbp6c####
sample<-"cbp6c"
umis<- Read10X("~/RUN/Run_538_10x_standard/Output/cellranger_count/run_538_10x-cbp6-c/outs/filtered_feature_bc_matrix/")
cbp6c_all <- CreateSeuratObject(counts = umis,project = sample)
cbp6c_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp6c_all, pattern = "^MT-")
VlnPlot(object = cbp6c_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp6c_all$percent.mt)+ 4*mad(cbp6c_all$percent.mt)
thr_ge<-median(cbp6c_all$nFeature_RNA)- 2*mad(cbp6c_all$nFeature_RNA)
thr_rn<-median(cbp6c_all$nCount_RNA)-2*mad(cbp6c_all$nCount_RNA)
p1<-VlnPlot(object = cbp6c_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp6c_all, features ="nCount_RNA")+geom_hline(yintercept = 30000)
p3<-VlnPlot(object = cbp6c_all, features ="nFeature_RNA")+geom_hline(yintercept = 5000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp6c_QC_cells_metrics.png"))
cbp6c_qc<-subset(cbp6c_all,percent.mt< thr_mt&nCount_RNA<30000&nFeature_RNA<5000)
#reassign samples
VlnPlot(cbp6c_qc,c("XIST","RPS4Y1"),group.by = "orig.ident")
sum(cbp6c_qc@assays$RNA@data["XIST",]>0)
sum(cbp6c_qc@assays$RNA@data["RPS4Y1",]>0)
cbp6c_qc@meta.data[cbp6c_qc@assays$RNA@data["XIST",]>0&
cbp6c_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"Doublet"
cbp6c_qc@meta.data[cbp6c_qc@assays$RNA@data["XIST",]>0&
cbp6c_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"ctrlF541"
cbp6c_qc@meta.data[cbp6c_qc@assays$RNA@data["XIST",]==0&
cbp6c_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"lgaM526"
cbp6c_qc@meta.data[cbp6c_qc@assays$RNA@data["XIST",]==0&
cbp6c_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"Negative"
table(cbp6c_qc@meta.data$sex.ID)
# ctrlF541 Doublet lgaM526 Negative
# 2346 114 1550 426
cbp6c_qc_s<-subset(cbp6c_qc,sex.ID!="Doublet"&sex.ID!="Negative")
cbp6c_qc_s$sample<-cbp6c_qc_s$sex.ID
table(cbp6c_qc_s$sample)
# ctrlF541 lgaM526
# 2346 1550
saveRDS(cbp6c_qc_s,fp(out,"cbp6c.rds"))
#cbp7a####
sample<-"cbp7a"
umis<- Read10X("~/RUN/Run_554_10x_standard/Output/cellranger_count/single_cell_barcode_run_554_10xcbp7-a/outs/filtered_feature_bc_matrix/")
cbp7a_all <- CreateSeuratObject(counts = umis,project = sample)
cbp7a_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp7a_all, pattern = "^MT-")
VlnPlot(object = cbp7a_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp7a_all$percent.mt)+ 4*mad(cbp7a_all$percent.mt)
thr_ge<-median(cbp7a_all$nFeature_RNA)- 2*mad(cbp7a_all$nFeature_RNA)
thr_rn<-median(cbp7a_all$nCount_RNA)-2*mad(cbp7a_all$nCount_RNA)
p1<-VlnPlot(object = cbp7a_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp7a_all, features ="nCount_RNA")+geom_hline(yintercept = 35000)
p3<-VlnPlot(object = cbp7a_all, features ="nFeature_RNA")+geom_hline(yintercept = 6000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp7a_QC_cells_metrics.png"))
cbp7a_qc<-subset(cbp7a_all,percent.mt<thr_mt&nCount_RNA<35000&nFeature_RNA<6000)
#reassign samples
VlnPlot(cbp7a_qc,c("XIST","RPS4Y1"),group.by = "orig.ident")
sum(cbp7a_qc@assays$RNA@data["XIST",]>0)
sum(cbp7a_qc@assays$RNA@data["RPS4Y1",]>0)
sum(cbp7a_qc@assays$RNA@data["XIST",]>0&cbp7a_qc@assays$RNA@data["RPS4Y1",]>0) #408 doublet
cbp7a_qc@meta.data[cbp7a_qc@assays$RNA@data["XIST",]>0&
cbp7a_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"Doublet"
cbp7a_qc@meta.data[cbp7a_qc@assays$RNA@data["XIST",]>0&
cbp7a_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"lgaF554"
cbp7a_qc@meta.data[cbp7a_qc@assays$RNA@data["XIST",]==0&
cbp7a_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"ctrlM555"
cbp7a_qc@meta.data[cbp7a_qc@assays$RNA@data["XIST",]==0&
cbp7a_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"Negative"
table(cbp7a_qc@meta.data$sex.ID)
# ctrlM555 Doublet lgaF554 Negative
# 1990 408 2800 573
cbp7a_qc_s<-subset(cbp7a_qc,sex.ID!="Doublet"&sex.ID!="Negative")
cbp7a_qc_s$sample<-cbp7a_qc_s$sex.ID
table(cbp7a_qc_s$sample)
# ctrlM555 lgaF554
# 1990 2800
saveRDS(cbp7a_qc_s,fp(out,"cbp7a.rds"))
#cbp7b####
sample<-"cbp7b"
umis<- Read10X("~/RUN/Run_554_10x_standard/Output/cellranger_count/single_cell_barcode_run_554_10xcbp7-b/outs/filtered_feature_bc_matrix/")
cbp7b_all <- CreateSeuratObject(counts = umis,project = sample)
cbp7b_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp7b_all, pattern = "^MT-")
VlnPlot(object = cbp7b_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp7b_all$percent.mt)+ 4*mad(cbp7b_all$percent.mt)
thr_ge<-median(cbp7b_all$nFeature_RNA)- 2*mad(cbp7b_all$nFeature_RNA)
thr_rn<-median(cbp7b_all$nCount_RNA)-2*mad(cbp7b_all$nCount_RNA)
p1<-VlnPlot(object = cbp7b_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp7b_all, features ="nCount_RNA")+geom_hline(yintercept = 35000)
p3<-VlnPlot(object = cbp7b_all, features ="nFeature_RNA")+geom_hline(yintercept = 6000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp7b_QC_cells_metrics.png"))
cbp7b_qc<-subset(cbp7b_all,percent.mt<thr_mt&nCount_RNA<35000&nFeature_RNA<6000)
#reassign samples
VlnPlot(cbp7b_qc,c("XIST","RPS4Y1"),group.by = "orig.ident")
sum(cbp7b_qc@assays$RNA@data["XIST",]>0)
sum(cbp7b_qc@assays$RNA@data["RPS4Y1",]>0)
sum(cbp7b_qc@assays$RNA@data["XIST",]>0&cbp7b_qc@assays$RNA@data["RPS4Y1",]>0) #419 doublet
cbp7b_qc@meta.data[cbp7b_qc@assays$RNA@data["XIST",]>0&
cbp7b_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"Doublet"
cbp7b_qc@meta.data[cbp7b_qc@assays$RNA@data["XIST",]>0&
cbp7b_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"lgaF521"
cbp7b_qc@meta.data[cbp7b_qc@assays$RNA@data["XIST",]==0&
cbp7b_qc@assays$RNA@data["RPS4Y1",]>0,"sex.ID"]<-"ctrlM518"
cbp7b_qc@meta.data[cbp7b_qc@assays$RNA@data["XIST",]==0&
cbp7b_qc@assays$RNA@data["RPS4Y1",]==0,"sex.ID"]<-"Negative"
table(cbp7b_qc@meta.data$sex.ID)
# ctrlM518 Doublet lgaF521 Negative
# 2009 469 3576 532
cbp7b_qc_s<-subset(cbp7b_qc,sex.ID!="Doublet"&sex.ID!="Negative")
cbp7b_qc_s$sample<-cbp7b_qc_s$sex.ID
table(cbp7b_qc_s$sample)
# ctrlM518 lgaF521
# 2009 3576
saveRDS(cbp7b_qc_s,fp(out,"cbp7b.rds"))
#cbp7c####
sample<-"cbp7c"
umis<- Read10X("~/RUN/Run_554_10x_standard/Output/cellranger_count/single_cell_barcode_run_554_10xcbp7-c/outs/filtered_feature_bc_matrix/")
cbp7c_all <- CreateSeuratObject(counts = umis,project = sample)
cbp7c_all[["percent.mt"]] <- PercentageFeatureSet(object = cbp7c_all, pattern = "^MT-")
VlnPlot(object = cbp7c_all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"))
#take the 4 four median absolute deviations above the median
thr_mt<-median(cbp7c_all$percent.mt)+ 4*mad(cbp7c_all$percent.mt)
thr_ge<-median(cbp7c_all$nFeature_RNA)- 2*mad(cbp7c_all$nFeature_RNA)
thr_rn<-median(cbp7c_all$nCount_RNA)-2*mad(cbp7c_all$nCount_RNA)
p1<-VlnPlot(object = cbp7c_all, features ="percent.mt")+geom_hline(yintercept = thr_mt )
p2<-VlnPlot(object = cbp7c_all, features ="nCount_RNA")+geom_hline(yintercept = 35000)
p3<-VlnPlot(object = cbp7c_all, features ="nFeature_RNA")+geom_hline(yintercept = 6000 )
p1|p2|p3+plot_layout(guides = "collect")
ggsave(fp(out,"cbp7c_QC_cells_metrics.png"))
cbp7c_qc<-subset(cbp7c_all,percent.mt<thr_mt&nCount_RNA<35000&nFeature_RNA<6000)
cbp7c_qc #2823 cells
cbp7c_qc$sample<-"ctrlM537"
saveRDS(cbp7c_qc,fp(out,"cbp7c.rds"))
#### INTEGRATION ####
#based on Hematomap (see 01-Make_Hematomap )
options(future.globals.maxSize = 50000 * 1024^2)
out<-"outputs/06-integr_singlecell_cbps"
source("../methyl/scripts/utils/new_utils.R")
library(Seurat)
library(parallel)
hmap<-readRDS("outputs/04-make_hematomap/hematomap_ctrls_sans_stress.rds")
hmap
DefaultAssay(hmap)<-"integrated"
hmap[["pca.annoy.neighbors"]] <- LoadAnnoyIndex(object = hmap[["pca.annoy.neighbors"]], file = "outputs/05-make_hematomap/reftmp.idx")
cbps_run<-c("cbp0_ctrl","cbp0_lga",
paste0("cbp",2:4),
ps("cbp6",c("a","b","c")),ps("cbp7",c("a","b","c")),
"cbp8")
length(cbps_run)#12
cbps_list<-lapply(cbps_run, function(run)readRDS(fp(out,ps(run,".rds"))))
cbps_list
cbps_list<-mclapply(cbps_list,function(x){
#message("calculate CC.Difference for",x@project.name)
if(!"S.Score"%in%colnames(x@meta.data)){
x<-SCTransform(x,method = "glmGamPoi")
x <- CellCycleScoring(x,s.features = cc.genes$s.genes,
g2m.features = cc.genes$g2m.genes,
set.ident = TRUE,
search=TRUE)
}
x$CC.Difference <- x$S.Score - x$G2M.Score
return(x)
},mc.cores = 6)
cbps_list<-mclapply(cbps_list, SCTransform,vars.to.regress=c("percent.mt","CC.Difference"),
return.only.var.genes=F,
method = "glmGamPoi",mc.cores = 6)
anchors <- list()
for (i in 1:length(cbps_list)) {
anchors[[i]] <- FindTransferAnchors(
reference = hmap,
query = cbps_list[[i]],
k.filter = NA,
reference.reduction = "pca",
reference.neighbors = "pca.annoy.neighbors",
dims = 1:50
)
}
for (i in 1:length(cbps_list)) {
cbps_list[[i]] <- MapQuery(
anchorset = anchors[[i]],
query = cbps_list[[i]],
reference = hmap,
refdata = list(
cell_type = "cell_type",
lineage = "lineage"),
reference.reduction = "pca",
reduction.model = "ref.umap"
)
}
# Merge the queries
cbps <- merge(cbps_list[[1]], cbps_list[2:length(cbps_list)],merge.dr = c("ref.pca","ref.umap"))
p<-DimPlot(cbps, reduction = "ref.umap", group.by = "predicted.cell_type", label = TRUE, repel = TRUE, label.size = 3) + NoLegend()
ggsave(fp(out,"predicted_cell_type.png"),plot=p)
DimPlot(cbps, reduction = "ref.umap", group.by = "predicted.lineage", label = TRUE, repel = TRUE, label.size = 3) + NoLegend()
#add metadata
cbps[["group"]]<-str_extract(cbps@meta.data$sample,"ctrl|lga|iugr")
cbps[["sex"]]<-str_extract(cbps@meta.data$sample,"M|F")
cbps[["group_sex"]]<-paste0(cbps@meta.data$group,cbps@meta.data$sex)
cbps$hto<-cbps$orig.ident%in%c("cbp2","cbp3","cbp4","cbp8")
cbps$batch<-cbps$orig.ident
cbps[["group_hto"]]<-paste0(cbps@meta.data$group,cbps@meta.data$hto)
cbps[["sample_hto"]]<-paste0(cbps@meta.data$sample,cbps@meta.data$hto)
cbps[["ambigous"]]<-cbps@meta.data$sample%in%c("iugrM558","lgaF559")
cbps[["cell_type_hmap"]]<-cbps$predicted.cell_type
cbps[["lineage_hmap"]]<-cbps$predicted.lineage
cbps$differentiated<-cbps$lineage_hmap%in%c("Mk/Er","18","DC","T cell","B cell")
#denovo umap
cbps <- RunUMAP(cbps, reduction = 'ref.pca', dims = 1:30,reduction.name = "denovo.umap",n.components = 2)
DimPlot(cbps, group.by = 'cell_type_hmap',reduction = "denovo.umap", label = TRUE)
DimPlot(cbps, group.by = 'lineage_hmap',reduction = "denovo.umap", label = TRUE)
saveRDS(cbps,fp(out,"cbps.rds"))
####CHECK INTEGR OK####
cbps_f<-subset(cbps,lineage_hmap!="18"&ambigous==F&group!="iugr"&hto==T)
rm(cbps)
#check qu'on a bien tout
#all
cbps_f# 12685 cells
length(unique(cbps_f$sample)) #14 samples
#n samples by group
mtd<-data.table(cbps_f@meta.data,keep.rownames = "bc")
mts<-unique(mtd,by=c("sample_hto"))
table(mts$hto,mts$group)
# ctrl lga
# TRUE 8 6
#n of cells
table(mtd$hto,mtd$group)
# ctrl lga
# TRUE 5823 6861
#n of HSC
table(mtd[lineage_hmap=="HSC"]$hto,mtd[lineage_hmap=="HSC"]$group)
# ctrl lga
# TRUE 2075 1903
saveRDS(cbps_f,fp(out,"cbps_filtered.rds"))
#check good assignmenet
VlnPlot(cbps_f,"predicted.lineage.score",group.by = "predicted.lineage",pt.size = 0)
#attention a HSC2, 3 et 4, GMP cycle, et MPP Ery
|
53e69363c5d3331cca83570097677fb428d90ecf | 6d6996ac49c162217dfaf177d7c403402ebc312c | /Part9_Dimensionality_Reduction/Section34_Principal_Component_Analysis_PCA/pca.R | 23941bd862e73d91de47c96677b31dfaf0181ad7 | [] | no_license | skols/machine-learning-a-to-z | 8b8c777f53a7d04ad672dd676e0527472c5c48ba | 524de4c65240f4a1ad961377d10623bf4023e1cb | refs/heads/master | 2021-07-12T16:33:51.608303 | 2017-10-15T15:57:45 | 2017-10-15T15:57:45 | 103,762,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,873 | r | pca.R | # PCA - Principal Component Analysis - Unsupervised (DV is not considered)
# Importing the dataset
setwd("C:/Development/Courses/Kirill Eremenko Data Science Courses/Machine_Learning_A-Z/Part9_Dimensionality_Reduction/Section34_Principal_Component_Analysis_PCA")
dataset <- read.csv("Wine.csv")
# Splitting the dataset into Training set and Test set
library(caTools)
set.seed(123) # like random_state
split <- sample.split(dataset$Customer_Segment, SplitRatio=0.8) # for training set
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
# Feature Scaling
training_set[-14] <- scale(training_set[-14])
test_set[-14] <- scale(test_set[-14])
# Applying PCA
library(caret)
library(e1071)
pca <- preProcess(x=training_set[-14], method="pca", pcaComp=2)
training_set <- predict(pca, training_set)
training_set <- training_set[c(2, 3, 1)]
test_set <- predict(pca, test_set)
test_set <- test_set[c(2, 3, 1)]
# Fitting SVM to the Training set
library(e1071)
classifier <- svm(formula=Customer_Segment ~ .,
data=training_set,
type="C-classification",
kernel="linear")
# Predicting the Test set results
y_pred <- predict(classifier, newdata=test_set[-3]) # remove last column of test set
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results - Points are truth and region is prediction
# install.packages("ElemStatLearn")
library(ElemStatLearn)
set <- training_set
X1 <- seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by=0.01)
X2 <- seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by=0.01)
grid_set <- expand.grid(X1, X2)
colnames(grid_set) <- c("PC1", "PC2")
y_grid <- predict(classifier, newdata=grid_set)
plot(set[, -3],
main="SVM (Training Set)",
xlab="PC1", ylab="PC2",
xlim=range(X1), ylim=range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add=TRUE)
points(grid_set, pch=".", col=ifelse(y_grid==2, "deepskyblue", ifelse(y_grid==1, "springgreen3", "tomato")))
points(set, pch=21, bg=ifelse(set[, 3]==2, "blue3", ifelse(set[, 3]==1, "green4", "red3")))
# Visualising the Test set results - Points are truth and region is prediction
# library(ElemStatLearn)
set <- test_set
X1 <- seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by=0.01)
X2 <- seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by=0.01)
grid_set <- expand.grid(X1, X2)
colnames(grid_set) <- c("PC1", "PC2")
y_grid <- predict(classifier, newdata=grid_set)
plot(set[, -3],
main="SVM (Test Set)",
xlab="PC1", ylab="PC1",
xlim=range(X1), ylim=range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add=TRUE)
points(grid_set, pch=".", col=ifelse(y_grid==2, "deepskyblue", ifelse(y_grid==1, "springgreen3", "tomato")))
points(set, pch=21, bg=ifelse(set[, 3]==2, "blue3", ifelse(set[, 3]==1, "green4", "red3")))
|
2fad9995b3c2fb1a52330285c6ee853791bfa6bd | e1cbbf8791b0ac6d40f6d5b397785560105441d9 | /man/add.log.axis.Rd | 888d0aa8b527caa5bc72ab0e4be59e50f8fb9341 | [] | no_license | wasquith/lmomco | 96a783dc88b67017a315e51da3326dfc8af0c831 | 8d7cc8497702536f162d7114a4b0a4ad88f72048 | refs/heads/master | 2023-09-02T07:48:53.169644 | 2023-08-30T02:40:09 | 2023-08-30T02:40:09 | 108,880,810 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,939 | rd | add.log.axis.Rd | \name{add.log.axis}
\alias{add.log.axis}
\title{Add a Polished Logarthimic Axis to a Plot}
\description{
This function provides special support for adding superior looking base-10 logarithmic axes relative to \bold{R}'s defaults, which are an embarassment. The \bold{Examples} section shows an overly elaborate version made by repeated calls to this function with a drawback that each call redraws the line of the axis so deletion in editing software might be required. This function is indexed under the \dQuote{lmomco functions} because of its relation to \code{\link{add.lmomco.axis}} and is not named \code{add.lmomcolog.axis} because such a name is too cumbersome.
}
\usage{
add.log.axis(make.labs=FALSE, logs=c(2, 3, 4, 5, 6, 7, 8, 9), side=1,
two.sided=FALSE, label=NULL, x=NULL, col.ticks=1, ...)
}
\arguments{
\item{make.labs}{A logical controlling whether the axis is labled according to the values in \code{logs}.}
\item{logs}{A numeric vector of log-cycles for which ticking and (or) labeling is made. These are normalized to the first log-cycle, so a value of \eqn{3} would spawn values such as \eqn{\cdots, 0.03, 0.3, 3, 30, \cdots} through a range exceeding the axis limits. The default anticipates that a second call to the function will be used to make longer ticks at the even log-cycles; hence, the value 1 is not in the default vector. The \bold{Examples} section provides a thorough demonstration.}
\item{side}{An integer specifying which side of the plot the axis is to be drawn on, and argument corresponds the axis side argument of the \code{axis} function. The axis is placed as follows: 1=below, 2=left, 3=above, and 4=right.}
\item{two.sided}{A logical controlling whether the side oppose of \code{side} also is to be drawn.}
\item{label}{The label (title) of the axis, which is placed by a call to function \code{mtext}, and thus either the \code{xlab} or \code{ylab} arguments for \code{plot} should be set to the empty string \code{""}.}
\item{x}{This is an optional data vector (untransformed!), which will compute nice axis limits and return them. These limits will align with (snap to) the integers within a log10-cycle.}
\item{col.ticks}{This is the same argument as the \code{axis} function.}
\item{...}{Additional arguments to pass to \code{axis}.}
}
\value{
No value is returned, except if argument \code{x} is given, for which nice axis limits are returned. By overall design, this function is used for its side effects.
}
\author{W.H. Asquith}
\seealso{\code{\link{add.lmomco.axis}} }
\examples{
\dontrun{
par(mgp=c(3,0.5,0)) # going to tick to the inside, change some parameters
X <- 10^sort(rnorm(65)); pp <- pp(X) # generate synthetic data
ylim <- add.log.axis(x=X) # snap to some nice integers within the cycle
plot(qnorm(pp), X, xaxt="n", yaxt="n", xlab="", ylab="", log="y",
xlim=c(-2,3), ylim=ylim, pch=6, yaxs="i", col=4)
add.lmomco.axis(las=2, tcl=0.5, side.type="RI", otherside.type="NPP")
# Logarithmic axis: the base ticks to show logarithms
add.log.axis(side=2, tcl=0.8*abs(par()$tcl), two.sided=TRUE)
# the long even-cycle tick, set to inside and outside
add.log.axis(logs=c(1), tcl=-0.5*abs(par()$tcl), side=2, two.sided=TRUE)
add.log.axis(logs=c(1), tcl=+1.3*abs(par()$tcl), side=2, two.sided=TRUE)
# now a micro tick at the 1.5 logs but only on the right
add.log.axis(logs=c(1.5), tcl=+0.5*abs(par()$tcl), side=4)
# and only label the micro tick at 1.5 on the right
add.log.axis(logs=c(1.5), side=4, make.labs=TRUE, las=3) # but weird rotate
# add the bulk tick labeling and axis label.
add.log.axis(logs=c(1, 2, 3, 4, 6), side=2, make.labs=TRUE, las=1, label="QUANTILE")
par(mgp=c(3,1,0)) # restore defaults}
}
\keyword{utility (logarithmic)}
\keyword{axis (utility)}
\keyword{Graphics}
\concept{logarithmic axis}
\concept{log axis}
\keyword{The lmomco functions}
|
6c54be881c1971286bf99b5d3df660d2951500d1 | 10fdbe892deaceeca599af2a9f0aa93b83f44086 | /run_analysis.R | dc5d9d30f6417dafe480e5baa53a46a511438eff | [] | no_license | interzone2001/getcleandata | b6ad3b058c7d39b0671bb540c25165588853de5e | 0b33f871891dd15d97895fc270d462833013eae6 | refs/heads/master | 2021-01-20T03:22:00.499407 | 2017-04-26T21:19:49 | 2017-04-26T21:19:49 | 89,527,475 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,156 | r | run_analysis.R | library(tidyverse)
# Read in test files
x_test <- read.table('X_test.txt',stringsAsFactors = FALSE)
y_test <- read.table('y_test.txt',stringsAsFactors = FALSE)
# Read in training files
subject_test <- read.table('subject_test.txt',stringsAsFactors = FALSE)
x_train <- read.table('X_train.txt',stringsAsFactors = FALSE)
y_train <- read.table('y_train.txt',stringsAsFactors = FALSE)
subject_train <- read.table('subject_train.txt',stringsAsFactors = FALSE)
# Read in list of variable names to use as header
features<-read.table('features.txt',stringsAsFactors = FALSE)
features <- subset(features,select=V2) #Just keep the column with the variable names
# transpose rows to columns to use for header
head<-as.character(t(features)) #coerce to character type
# Apply column names to dataset headers
colnames(x_test) <- head
colnames(x_train) <- head
colnames(y_test) <- c('Activity') #more descriptive variable name
colnames(y_train) <- c('Activity')
colnames(subject_test) <- c('Subject')
colnames(subject_train) <-c('Subject')
# Combine x and y datasets
xy_test <- cbind(y_test,x_test)
xy_train <- cbind(y_train,x_train)
# Combine with subject numbers
xy_test <- cbind(subject_test,xy_test)
xy_train <- cbind(subject_train,xy_train)
# Combine training and test datasets
xy_test_train <- rbind(xy_test,xy_train)
#Select for columns with mean() or std()
xy_tt_final<- xy_test_train[ , grep("mean\\(\\)|std\\(\\)|Subject|Activity" , colnames(xy_test_train))]
#Rename Activity values with descriptive label based on activity_labels.txt
xy_tt_final$Activity[xy_tt_final$Activity == 1]<-"Walking"
xy_tt_final$Activity[xy_tt_final$Activity == 2]<-"Walking Upstairs"
xy_tt_final$Activity[xy_tt_final$Activity == 3]<-"Walking Downstairs"
xy_tt_final$Activity[xy_tt_final$Activity == 4]<-"Sitting"
xy_tt_final$Activity[xy_tt_final$Activity == 5]<-"Standing"
xy_tt_final$Activity[xy_tt_final$Activity == 6]<-"Laying"
#Group by Subject and Activity, and then provide an average value for all columns
tidy_df<-xy_tt_final%>%group_by(Subject,Activity)%>%summarize_all(mean)
#write out tidy summary dataframe to file for uploading
write.table(tidy_df,"UCIHAR tidy.txt")
|
c5f803fd766153cb3a96aae9515d0a70d68db60f | fedf6e987e8c945d8137f929d2d9fcfde85d4e5e | /man/inferGenotypeBayesian.Rd | d92a4643768a45a36e083e74f7f0ad1dfb312e3d | [] | no_license | cran/tigger | e34468f5a6642230099f46bbe1d2fd275365934b | 8b2e2afbd9069e0d4fd52e27e8b916716c71a565 | refs/heads/master | 2022-10-07T22:50:30.430691 | 2022-09-20T20:50:02 | 2022-09-20T20:50:02 | 60,682,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,328 | rd | inferGenotypeBayesian.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesian.R
\name{inferGenotypeBayesian}
\alias{inferGenotypeBayesian}
\title{Infer a subject-specific genotype using a Bayesian approach}
\usage{
inferGenotypeBayesian(
data,
germline_db = NA,
novel = NA,
v_call = "v_call",
seq = "sequence_alignment",
find_unmutated = TRUE,
priors = c(0.6, 0.4, 0.4, 0.35, 0.25, 0.25, 0.25, 0.25, 0.25)
)
}
\arguments{
\item{data}{a \code{data.frame} containing V allele
calls from a single subject. If \code{find_unmutated}
is \code{TRUE}, then the sample IMGT-gapped V(D)J sequence
should be provided in column \code{sequence_alignment}}
\item{germline_db}{named vector of sequences containing the
germline sequences named in \code{allele_calls}.
Only required if \code{find_unmutated} is \code{TRUE}.}
\item{novel}{an optional \code{data.frame} of the type
novel returned by \link{findNovelAlleles} containing
germline sequences that will be utilized if
\code{find_unmutated} is \code{TRUE}. See Details.}
\item{v_call}{column in \code{data} with V allele calls.
Default is \code{"v_call"}.}
\item{seq}{name of the column in \code{data} with the
aligned, IMGT-numbered, V(D)J nucleotide sequence.
Default is \code{"sequence_alignment"}.}
\item{find_unmutated}{if \code{TRUE}, use \code{germline_db} to
find which samples are unmutated. Not needed
if \code{allele_calls} only represent
unmutated samples.}
\item{priors}{a numeric vector of priors for the multinomial distribution.
The \code{priors} vector must be nine values that defined
the priors for the heterozygous (two allele),
trizygous (three allele), and quadrozygous (four allele)
distributions. The first two values of \code{priors} define
the prior for the heterozygous case, the next three values are for
the trizygous case, and the final four values are for the
quadrozygous case. Each set of priors should sum to one.
Note, each distribution prior is actually defined internally
by set of four numbers, with the unspecified final values
assigned to \code{0}; e.g., the heterozygous case is
\code{c(priors[1], priors[2], 0, 0)}. The prior for the
homozygous distribution is fixed at \code{c(1, 0, 0, 0)}.}
}
\value{
A \code{data.frame} of alleles denoting the genotype of the subject with the log10
of the likelihood of each model and the log10 of the Bayes factor. The output
contains the following columns:
\itemize{
\item \code{gene}: The gene name without allele.
\item \code{alleles}: Comma separated list of alleles for the given \code{gene}.
\item \code{counts}: Comma separated list of observed sequences for each
corresponding allele in the \code{alleles} list.
\item \code{total}: The total count of observed sequences for the given \code{gene}.
\item \code{note}: Any comments on the inferrence.
\item \code{kh}: log10 likelihood that the \code{gene} is homozygous.
\item \code{kd}: log10 likelihood that the \code{gene} is heterozygous.
\item \code{kt}: log10 likelihood that the \code{gene} is trizygous
\item \code{kq}: log10 likelihood that the \code{gene} is quadrozygous.
\item \code{k_diff}: log10 ratio of the highest to second-highest zygosity likelihoods.
}
}
\description{
\code{inferGenotypeBayesian} infers an subject's genotype by applying a Bayesian framework
with a Dirichlet prior for the multinomial distribution. Up to four distinct alleles are
allowed in an individual’s genotype. Four likelihood distributions were generated by
empirically fitting three high coverage genotypes from three individuals
(Laserson and Vigneault et al, 2014). A posterior probability is calculated for the
four most common alleles. The certainty of the highest probability model was
calculated using a Bayes factor (the most likely model divided by second-most likely model).
The larger the Bayes factor (K), the greater the certainty in the model.
}
\details{
Allele calls representing cases where multiple alleles have been
assigned to a single sample sequence are rare among unmutated
sequences but may result if nucleotides for certain positions are
not available. Calls containing multiple alleles are treated as
belonging to all groups. If \code{novel} is provided, all
sequences that are assigned to the same starting allele as any
novel germline allele will have the novel germline allele appended
to their assignent prior to searching for unmutated sequences.
}
\note{
This method works best with data derived from blood, where a large
portion of sequences are expected to be unmutated. Ideally, there
should be hundreds of allele calls per gene in the input.
}
\examples{
# Infer IGHV genotype, using only unmutated sequences, including novel alleles
inferGenotypeBayesian(AIRRDb, germline_db=SampleGermlineIGHV, novel=SampleNovel,
find_unmutated=TRUE, v_call="v_call", seq="sequence_alignment")
}
\references{
\enumerate{
\item Laserson U and Vigneault F, et al. High-resolution antibody dynamics of
vaccine-induced immune responses. PNAS. 2014 111(13):4928-33.
}
}
\seealso{
\link{plotGenotype} for a colorful visualization and
\link{genotypeFasta} to convert the genotype to nucleotide sequences.
See \link{inferGenotype} to infer a subject-specific genotype using
a frequency method
}
|
8afc509a6e23c16d4250838c7f1e694aa815cfdd | 04da270e474aa91a8d4660685ec62c6388646276 | /man/fpl_get_player_all.Rd | 5323977608456eee5b465459d17c24dfa51166bf | [] | no_license | maskegger/fplr | e4a6a2fa296fbda4dbcb6dbe301d7eb3053fde69 | a78418630fd60707ad04794aea48ee5cf3a47d5f | refs/heads/master | 2022-04-17T21:05:46.504678 | 2020-04-13T14:18:56 | 2020-04-13T14:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 446 | rd | fpl_get_player_all.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/players.R
\name{fpl_get_player_all}
\alias{fpl_get_player_all}
\title{Get data on all players}
\usage{
fpl_get_player_all()
}
\value{
a tibble
}
\description{
Retrieve player data for the current FPL season, obtained via the
\href{https://fantasy.premierleague.com/api/bootstrap-static}{bootstrap-static API endpoint}.
}
\examples{
\donttest{
fpl_get_player_all()
}
}
|
f775ffbc5373d11cc387ba172da7cf5c906d7cbe | 19667baea785a5181c29daaea9799435107e91c3 | /data/r/dfd846852499df6dcf35d9db1273408d_forecastTBATS.R | 386dc2efdc2b81e0a958d49fb7e0f123c5e066a5 | [
"Apache-2.0"
] | permissive | maxim5/code-inspector | d71f024390fe5a5f7c1144b289e0e22b59dbb2c7 | 14812dfbc7bac1d76c4d9e5be2cdf83fc1c391a1 | refs/heads/master | 2021-09-10T19:23:39.324768 | 2018-03-31T17:17:27 | 2018-03-31T17:17:27 | 115,271,942 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,066 | r | dfd846852499df6dcf35d9db1273408d_forecastTBATS.R | #' @rdname forecast.bats
#' @export
forecast.tbats <- function(object, h, level=c(80, 95), fan=FALSE, biasadj=NULL, ...) {
# Check if forecast.tbats called incorrectly
if (identical(class(object), "bats")) {
return(forecast.bats(object, h, level, fan, biasadj, ...))
}
# Set up the variables
if (any(class(object$y) == "ts")) {
ts.frequency <- frequency(object$y)
} else {
ts.frequency <- ifelse(!is.null(object$seasonal.periods), max(object$seasonal.periods), 1)
}
if (missing(h)) {
if (is.null(object$seasonal.periods)) {
h <- ifelse(ts.frequency == 1, 10, 2 * ts.frequency)
} else {
h <- 2 * max(object$seasonal.periods)
}
}
else if (h <= 0) {
stop("Forecast horizon out of bounds")
}
if (fan) {
level <- seq(51, 99, by = 3)
} else {
if (min(level) > 0 & max(level) < 1) {
level <- 100 * level
} else if (min(level) < 0 | max(level) > 99.99) {
stop("Confidence limit out of range")
}
}
if (!is.null(object$k.vector)) {
tau <- 2 * sum(object$k.vector)
} else {
tau <- 0
}
x <- matrix(0, nrow = nrow(object$x), ncol = h)
y.forecast <- numeric(h)
if (!is.null(object$beta)) {
adj.beta <- 1
} else {
adj.beta <- 0
}
# Set up the matrices
w <- .Call("makeTBATSWMatrix", smallPhi_s = object$damping.parameter, kVector_s = as.integer(object$k.vector), arCoefs_s = object$ar.coefficients, maCoefs_s = object$ma.coefficients, tau_s = as.integer(tau), PACKAGE = "forecast")
if (!is.null(object$seasonal.periods)) {
gamma.bold <- matrix(0, nrow = 1, ncol = tau)
.Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = as.integer(object$k.vector), gammaOne_s = object$gamma.one.v, gammaTwo_s = object$gamma.two.v, PACKAGE = "forecast")
} else {
gamma.bold <- NULL
}
g <- matrix(0, nrow = (tau + 1 + adj.beta + object$p + object$q), ncol = 1)
if (object$p != 0) {
g[(1 + adj.beta + tau + 1), 1] <- 1
}
if (object$q != 0) {
g[(1 + adj.beta + tau + object$p + 1), 1] <- 1
}
.Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = object$alpha, beta_s = object$beta.v, PACKAGE = "forecast")
# print(g)
F <- makeTBATSFMatrix(alpha = object$alpha, beta = object$beta, small.phi = object$damping.parameter, seasonal.periods = object$seasonal.periods, k.vector = as.integer(object$k.vector), gamma.bold.matrix = gamma.bold, ar.coefs = object$ar.coefficients, ma.coefs = object$ma.coefficients)
# Do the forecast
y.forecast[1] <- w$w.transpose %*% object$x[, ncol(object$x)]
x[, 1] <- F %*% object$x[, ncol(object$x)] # + g %*% object$errors[length(object$errors)]
if (h > 1) {
for (t in 2:h) {
x[, t] <- F %*% x[, (t - 1)]
y.forecast[t] <- w$w.transpose %*% x[, (t - 1)]
}
}
## Make prediction intervals here
lower.bounds <- upper.bounds <- matrix(NA, ncol = length(level), nrow = h)
variance.multiplier <- numeric(h)
variance.multiplier[1] <- 1
if (h > 1) {
for (j in 1:(h - 1)) {
if (j == 1) {
f.running <- diag(ncol(F))
} else {
f.running <- f.running %*% F
}
c.j <- w$w.transpose %*% f.running %*% g
variance.multiplier[(j + 1)] <- variance.multiplier[j] + c.j ^ 2
}
}
variance <- object$variance * variance.multiplier
# print(variance)
st.dev <- sqrt(variance)
for (i in 1:length(level)) {
marg.error <- st.dev * abs(qnorm((100 - level[i]) / 200))
lower.bounds[, i] <- y.forecast - marg.error
upper.bounds[, i] <- y.forecast + marg.error
}
# Inv Box Cox transform if required
if (!is.null(object$lambda)) {
y.forecast <- InvBoxCox(y.forecast, object$lambda, biasadj, list(level = level, upper = upper.bounds, lower = lower.bounds))
lower.bounds <- InvBoxCox(lower.bounds, object$lambda)
if (object$lambda < 1) {
lower.bounds <- pmax(lower.bounds, 0)
}
upper.bounds <- InvBoxCox(upper.bounds, object$lambda)
}
## Calc a start time for the forecast
# y <- object$y
start.time <- start(object$y)
y <- ts(c(object$y, 0), start = start.time, frequency = ts.frequency)
# y[(length(y)+1)] <- 0
# y <- ts(y, start=object$start.time, frequency=ts.frequency)
fcast.start.time <- end(y)
# Make msts object for x and mean
x <- msts(object$y, seasonal.periods = (if (!is.null(object$seasonal.periods)) {
object$seasonal.periods
} else {
ts.frequency
}), ts.frequency = ts.frequency, start = start.time)
fitted.values <- msts(object$fitted.values, seasonal.periods = (if (!is.null(object$seasonal.periods)) {
object$seasonal.periods
} else {
ts.frequency
}), ts.frequency = ts.frequency, start = start.time)
y.forecast <- msts(y.forecast, seasonal.periods = (if (!is.null(object$seasonal.periods)) {
object$seasonal.periods
} else {
ts.frequency
}), ts.frequency = ts.frequency, start = fcast.start.time)
upper.bounds <- msts(upper.bounds, seasonal.periods = (if (!is.null(object$seasonal.periods)) {
object$seasonal.periods
} else {
ts.frequency
}), ts.frequency = ts.frequency, start = fcast.start.time)
lower.bounds <- msts(lower.bounds, seasonal.periods = (if (!is.null(object$seasonal.periods)) {
object$seasonal.periods
} else {
ts.frequency
}), ts.frequency = ts.frequency, start = fcast.start.time)
colnames(upper.bounds) <- colnames(lower.bounds) <- paste0(level, "%")
forecast.object <- list(
model = object, mean = y.forecast, level = level, x = x, series = object$series,
upper = upper.bounds, lower = lower.bounds, fitted = fitted.values,
method = as.character(object), residuals = object$errors
)
if (is.null(object$series)) {
forecast.object$series <- deparse(object$call$y)
}
class(forecast.object) <- "forecast"
return(forecast.object)
}
#' @export
as.character.tbats <- function(x, ...) {
name <- "TBATS("
if (!is.null(x$lambda)) {
name <- paste(name, round(x$lambda, digits = 3), sep = "")
} else {
name <- paste(name, "1", sep = "")
}
name <- paste(name, ", {", sep = "")
if (!is.null(x$ar.coefficients)) {
name <- paste(name, length(x$ar.coefficients), sep = "")
} else {
name <- paste(name, "0", sep = "")
}
name <- paste(name, ",", sep = "")
if (!is.null(x$ma.coefficients)) {
name <- paste(name, length(x$ma.coefficients), sep = "")
} else {
name <- paste(name, "0", sep = "")
}
name <- paste(name, "}, ", sep = "")
if (!is.null(x$damping.parameter)) {
name <- paste(name, round(x$damping.parameter, digits = 3), ",", sep = "")
} else {
name <- paste(name, "-,", sep = "")
}
if (!is.null(x$seasonal.periods)) {
name <- paste(name, " {", sep = "")
M <- length(x$seasonal.periods)
for (i in 1:M) {
name <- paste(name, "<", round(x$seasonal.periods[i], 2), ",", x$k.vector[i], ">", sep = "")
if (i < M) {
name <- paste(name, ", ", sep = "")
} else {
name <- paste(name, "})", sep = "")
}
}
} else {
name <- paste(name, "{-})", sep = "")
}
return(name)
}
|
a9c7e4190b5d3cd7be055889912b7e0e89ef1fd2 | 82fee0c3a5fc6467717f8145012c770eeaa5ac24 | /Labs/Lab04/BARFIELD_Lab04.R | 804ad380b2a6159b731e045421ccdca647061ab3 | [] | no_license | amybarfield/CompBioLabsAndHomework | 5adf59ee6d645ba63ec529744259eb0f535fa9e9 | 013ea1464bb3ed843ef2cac56272b60788905964 | refs/heads/master | 2020-04-18T15:04:22.673070 | 2019-05-04T04:19:45 | 2019-05-04T04:19:45 | 167,604,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,525 | r | BARFIELD_Lab04.R | ##Part I##
#Step 1
#Write "for loop" that prints hi 10x
for (i in seq(1,10) ) {
print("hi")
}
#Step 2
#Write "for loop" to determine how much money Tim will have in 8 weeks
cash <- 10 #Tim's starting money
allowance <- 5 #amount of money Tim gets weekly
gum <- 2*1.34 #cost of 2 packs of gum
time <- (1:8) #weeks
for (i in seq(time)) {
cash <- cash + allowance - gum
print(cash)
}
#Step 3
#Population of 2000 shrinks by 5% every year
pop_init <- 2000 #starting population
shrink_rate <- 0.05
numtime <- (1:7) #number of years
for (i in seq(numtime)) {
pop_init <- pop_init - (pop_init * shrink_rate)
print(pop_init)
}
#Step 4
#discrete-time logistic growth equation
# n[t] = n[t-1] + ( r * n[t-1] * (K - n[t-1])/K )
#where n[t] is the abundance of the population at time t,
#n[t – 1] is the abundance of the population in the previous time step,
#r is the intrinsic growth rate of the population, and
#K is the environmental carrying capacity for the population.
K <- 10000
r <- 0.8
t <- 1
n <- rep(2500,12)
for (t in 2:12) {
n[t] <- n[t-1] + ( r * n[t-1] * (K - n[t-1])/K )
}
print(n)
##Part II##
#Step 5
#5a make a vector of 18 0s
vector <- rep(0,18)
#5b make a vector where it s 3 to the ith
for (i in seq(1,18)) {
vector[i] <- 3 * i
}
vector
#5c
# make new vector of 0s but the first digit is 1
vector_new <- rep(0,18)
vector_new[1] <- 1
vector_new
#5d wr"ite a "for loop" so that starting with the second entry of the vector created in part c, the value stored in that position in the vector is equal to one plus twice the value of the previous entry
for (i in seq(2,18)) {
vector_new[i] <- 1 + (2 * vector_new[i-1])
}
vector_new
#Step 6
# Write a loop to create the first 20 numbers of a Fibonacci sequence
fibo <- rep(0,20) #create vector, mamake the first number 0
fibo[2] <- 1 #change second number in sequence to 1
for (i in seq(2,20)) {
fibo[i+1] <- (fibo[i] + fibo[i-1])
}
fibo
#Step 7
#discrete-time logistic growth equation: n[t] = n[t-1] + ( r * n[t-1] * (K - n[t-1])/K )
#n[t – 1] is the abundance of the population in the previous time step,
K <- 10000 #K is the environmental carrying capacity for the population.
r <- 0.8 #r is the intrinsic growth rate of the population
time <- (1:12)
abundance <- rep(2500,12) #where abundance[t] is the abundance of the population at time t
for (t in 2:12) {
abundance[t] <- abundance[t-1] + ( r * abundance[t-1] * (K - abundance[t-1])/K )
}
print(abundance)
#make plot of abundance vs time
quartz()
plot(time,abundance)
|
c894cf8dfbe941237b01a4f96da5eebee9fe151d | 209b83dc4e85f63caa7a714ffc6a61948a9a0c05 | /tests/testthat/testmanyttestsIS.R | d76f79d1d496c9d3f44a6c2ffec3f423bcb839a6 | [] | no_license | MAgojam/manytee | 96e67e8a39e259f83f303f08fa68c4f42d48c8ef | 201e06944813ccf9bf5f0475e10580a970529760 | refs/heads/master | 2022-10-15T10:02:51.995261 | 2020-06-10T22:13:08 | 2020-06-10T22:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,901 | r | testmanyttestsIS.R |
context('manyttestsIS')
test_that('manyttestsIS sunny works', {
set.seed(1337)
N <- 100
df <- data.frame(
y = rnorm(N),
x1 = sample(LETTERS[1:2], size = N, replace = TRUE),
x2 = sample(letters[1:3], size = N, replace = TRUE)
)
result <- manyttestsIS(df, vars(y), vars(x1, x2),
corMethod = 'none', effectSize = TRUE,
ciES = TRUE)
table <- result$tests$asDF
# Select one test
row <- table[table$x11=='A' & table$x21=='a' & table$x12=='B' & table$x22=='b',]
# Perform t-test + effect size using R packages for these groups
group1 <- df[df$x1 == 'A' & df$x2 == 'a',]$y
group2 <- df[df$x1 == 'B' & df$x2 == 'b',]$y
t <- t.test(group1, group2, var.equal = TRUE)
es <- effsize::cohen.d(group1, group2)
# Test t-statistics
expect_equivalent(row$t, t$statistic, tolerance = 1e-3)
expect_equivalent(row$df, t$parameter, tolerance = 1e-3)
expect_equivalent(row$p, t$p.value, tolerance = 1e-3)
# Test Effect Size + CI
expect_equivalent(row$es, es$estimate, tolerance = 1e-3)
expect_equivalent(row$ciles, es$conf.int[1], tolerance = 1e-3)
expect_equivalent(row$ciues, es$conf.int[2], tolerance = 1e-3)
})
test_that('manyttestsIS rainy works', {
df <- list(
'y 1' = c(1, 2, 2, 1, 3, 4),
'x 1' = c('A', 'A', 'A', 'A', 'B 2', 'C')
)
attr(df, 'row.names') <- seq_len(length(df[[1]]))
attr(df, 'class') <- 'data.frame'
result <- manyttestsIS(df, 'y 1', 'x 1')
table <- result$tests$asDF
r1 <- t.test(c(1,2,2,1), 3, var.equal = TRUE)
# Test t-statistics
expect_equivalent(table$t[1], r1$statistic, tolerance = 1e-3)
expect_equivalent(table$p[1], r1$p.value, tolerance = 1e-3)
testthat::expect_condition(is.nan(df[3]))
})
|
d05f3c6faf4489e067c52e2dd9fa9766cb3b910c | f4f8dcc179c9516dab48bbbb7455a75f2e7c08e7 | /resources/analytics/Portuguese/ui.R | 0fedfc9fda4f7974aad0c1a325ac79262e880a0d | [
"MIT"
] | permissive | manuelvarzen/danielmarcelino.github.io | 7f755e2bc872d421ceb15f85cc38b18b98ae2291 | 35def12cf9ceee46315a994bb4c265bcd1cc6bbc | refs/heads/master | 2020-03-07T12:53:00.023144 | 2017-11-12T17:14:53 | 2017-11-12T17:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,159 | r | ui.R | # Example borrowed from jcheng5 @ github
library(shiny)
library(ggplot2)
dataset <- diamonds
shinyUI(pageWithSidebar(
# Title appearing at the top of the page
headerPanel("Diamonds Explorer"),
# The sidebarPanel specifies the options presented to the user.
# Inputs come in various forms. In this example we have a slider bar
# for specifying sample size, 5 dropdown menus for selecting variables from
# the dataset, and two checkboxes for specifying point jitter and smoothing.
sidebarPanel(
# This code produces a slider bar through which the user can specify
# the input$sampleSize parameter.
sliderInput('sampleSize', 'Sample Size', min=1, max=nrow(dataset),
value=min(1000, nrow(dataset)), step=500, round=0),
# The three lines of code below provide the user with dropdown menus
# through which to specify the x, y, and color arguments for ggplot
# Basic syntax:
# selectInput(inputID, label, choices) creates a dropdown menu
# titled 'label' asking the user to select one of the 'choices'.
# The user's selection is stored in a variable called input$inputID.
selectInput('x', 'X', names(dataset)),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
selectInput('color', 'Color', c('None', names(dataset))),
# The next two lines provide the user with a checkbox to specify
# logical values. In this case, the user can specify whether to use jitter
# and whether to overlay a smoother.
checkboxInput('jitter', 'Jitter'),
checkboxInput('smooth', 'Smooth'),
# The next two lines provide the user with dropdown menus from which
# To select faceting parameters. Note that 'None' is provided
# as one of the choices.
selectInput('facet_row', 'Facet Row', c(None='.', names(dataset))),
selectInput('facet_col', 'Facet Column', c(None='.', names(dataset)))
),
# The server-side code constructs a renderPlot() specification
# that gets saved as output$plot.
# plotOutput(outputId) renders the renderPlot() given by output$outputId
mainPanel(
plotOutput('plot')
)
)) |
7701a2c3b7a5052b468a4152d7ef733a165094a1 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /mlr3proba/man/mlr_pipeops_trafotask_survregr.Rd | 124915a14c2b050d2f6e9e07d871e13d219b3ecf | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 8,446 | rd | mlr_pipeops_trafotask_survregr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PipeOpTaskSurvRegr.R
\name{mlr_pipeops_trafotask_survregr}
\alias{mlr_pipeops_trafotask_survregr}
\alias{PipeOpTaskSurvRegr}
\title{PipeOpTaskSurvRegr}
\description{
Transform \link{TaskSurv} to \link[mlr3:TaskRegr]{TaskRegr}.
}
\section{Input and Output Channels}{
Input and output channels are inherited from \link{PipeOpTaskTransformer}.
The output is the input \link{TaskSurv} transformed to a \link[mlr3:TaskRegr]{TaskRegr}.
}
\section{State}{
The \verb{$state} is a named \code{list} with the \verb{$state} elements
\itemize{
\item \code{instatus}: Censoring status from input training task.
\item \code{outstatus} : Censoring status from input prediction task.
}
}
\section{Parameters}{
The parameters are
\itemize{
\item \verb{method::character(1))}\cr
Method to use for dealing with censoring. Options are \code{"ipcw"} (Vock et al., 2016): censoring
is column is removed and a \code{weights} column is added, weights are inverse estimated survival
probability of the censoring distribution evaluated at survival time;
\code{"mrl"} (Klein and Moeschberger, 2003): survival time of censored
observations is transformed to the observed time plus the mean residual life-time at the moment
of censoring; \code{"bj"} (Buckley and James, 1979): Buckley-James imputation assuming an AFT
model form, calls \link[bujar:bujar]{bujar::bujar}; \code{"delete"}: censored observations are deleted from the
data-set - should be used with caution if censoring is informative; \code{"omit"}: the censoring
status column is deleted - again should be used with caution; \code{"reorder"}: selects features and
targets and sets the target in the new task object. Note that \code{"mrl"} and \code{"ipcw"} will perform
worse with Type I censoring.
\item \verb{estimator::(character(1))}\cr
Method for calculating censoring weights or mean residual lifetime in \code{"mrl"},
current options are: \code{"kaplan"}: unconditional Kaplan-Meier estimator;
\code{"akritas"}: conditional non-parameteric nearest-neighbours estimator;
\code{"cox"}.
\item \verb{alpha::(numeric(1))}\cr
When \code{ipcw} is used, optional hyper-parameter that adds an extra penalty to the weighting for
censored observations. If set to \code{0} then censored observations are given zero weight and
deleted, weighting only the non-censored observations. A weight for an observation is then
\eqn{(\delta + \alpha(1-\delta))/G(t)} where \eqn{\delta} is the censoring indicator.
\item \code{eps::numeric(1)}\cr
Small value to replace \code{0} survival probabilities with in IPCW to prevent infinite weights.
\item \verb{lambda::(numeric(1))}\cr
Nearest neighbours parameter for \code{mlr3extralearners::akritas} estimator, default \code{0.5}.
\item \verb{features, target :: character())}\cr
For \code{"reorder"} method, specify which columns become features and targets.
\item \verb{learner cneter, mimpu, iter.bj, max.cycle, mstop, nu}\cr
Passed to \link[bujar:bujar]{bujar::bujar}.
}
}
\examples{
\dontrun{
if (requireNamespace("mlr3pipelines", quietly = TRUE)) {
library(mlr3)
library(mlr3pipelines)
# these methods are generally only successful if censoring is not too high
# create survival task by undersampling
task = tsk("rats")$filter(
c(which(tsk("rats")$truth()[,2]==1),
sample(which(tsk("rats")$truth()[,2]==0), 42))
)
# deletion
po = po("trafotask_survregr", method = "delete")
po$train(list(task, NULL))[[1]] # 42 deleted
# omission
po = po("trafotask_survregr", method = "omit")
po$train(list(task, NULL))[[1]]
if (requireNamespace("mlr3extralearners", quietly = TRUE)) {
# ipcw with Akritas
po = po("trafotask_survregr", method = "ipcw", estimator = "akritas", lambda = 0.4, alpha = 0)
new_task = po$train(list(task, NULL))[[1]]
print(new_task)
new_task$weights
}
# mrl with Kaplan-Meier
po = po("trafotask_survregr", method = "mrl")
new_task = po$train(list(task, NULL))[[1]]
data.frame(new = new_task$truth(), old = task$truth())
# Buckley-James imputation
if (requireNamespace("bujar", quietly = TRUE)) {
po = po("trafotask_survregr", method = "bj")
new_task = po$train(list(task, NULL))[[1]]
data.frame(new = new_task$truth(), old = task$truth())
}
# reorder - in practice this will be only be used in a few graphs
po = po("trafotask_survregr", method = "reorder", features = c("sex", "rx", "time", "status"),
target = "litter")
new_task = po$train(list(task, NULL))[[1]]
print(new_task)
# reorder using another task for feature names
po = po("trafotask_survregr", method = "reorder", target = "litter")
new_task = po$train(list(task, task))[[1]]
print(new_task)
}
}
}
\references{
Buckley, Jonathan, James, Ian (1979).
\dQuote{Linear Regression with Censored Data.}
\emph{Biometrika}, \bold{66}(3), 429--436.
\doi{10.2307/2335161}, \url{https://www.jstor.org/stable/2335161}.
Klein, P J, Moeschberger, L M (2003).
\emph{Survival analysis: techniques for censored and truncated data}, 2 edition.
Springer Science & Business Media.
ISBN 0387216456.
Vock, M D, Wolfson, Julian, Bandyopadhyay, Sunayan, Adomavicius, Gediminas, Johnson, E P, Vazquez-Benitez, Gabriela, O'Connor, J P (2016).
\dQuote{Adapting machine learning techniques to censored time-to-event health record data: A general-purpose approach using inverse probability of censoring weighting.}
\emph{Journal of Biomedical Informatics}, \bold{61}, 119--131.
\doi{https://doi.org/10.1016/j.jbi.2016.03.009}, \url{https://www.sciencedirect.com/science/article/pii/S1532046416000496}.
}
\seealso{
Other PipeOps:
\code{\link{PipeOpPredTransformer}},
\code{\link{PipeOpTaskTransformer}},
\code{\link{PipeOpTransformer}},
\code{\link{mlr_pipeops_survavg}},
\code{\link{mlr_pipeops_trafopred_regrsurv}},
\code{\link{mlr_pipeops_trafopred_survregr}},
\code{\link{mlr_pipeops_trafotask_regrsurv}}
Other Transformation PipeOps:
\code{\link{mlr_pipeops_trafopred_regrsurv}},
\code{\link{mlr_pipeops_trafopred_survregr}},
\code{\link{mlr_pipeops_trafotask_regrsurv}}
}
\concept{PipeOps}
\concept{Transformation PipeOps}
\section{Super classes}{
\code{\link[mlr3pipelines:PipeOp]{mlr3pipelines::PipeOp}} -> \code{\link[mlr3proba:PipeOpTransformer]{mlr3proba::PipeOpTransformer}} -> \code{\link[mlr3proba:PipeOpTaskTransformer]{mlr3proba::PipeOpTaskTransformer}} -> \code{PipeOpTaskSurvRegr}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{PipeOpTaskSurvRegr$new()}}
\item \href{#method-clone}{\code{PipeOpTaskSurvRegr$clone()}}
}
}
\if{html}{
\out{<details open ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="mlr3pipelines" data-topic="PipeOp" data-id="predict">}\href{../../mlr3pipelines/html/PipeOp.html#method-predict}{\code{mlr3pipelines::PipeOp$predict()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3pipelines" data-topic="PipeOp" data-id="print">}\href{../../mlr3pipelines/html/PipeOp.html#method-print}{\code{mlr3pipelines::PipeOp$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3pipelines" data-topic="PipeOp" data-id="train">}\href{../../mlr3pipelines/html/PipeOp.html#method-train}{\code{mlr3pipelines::PipeOp$train()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PipeOpTaskSurvRegr$new(id = "trafotask_survregr", param_vals = list())}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{id}}{(\code{character(1)})\cr
Identifier of the resulting object.}
\item{\code{param_vals}}{(\code{list()})\cr
List of hyperparameter settings, overwriting the hyperparameter settings that would
otherwise be set during construction.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{PipeOpTaskSurvRegr$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
54b5bd4e3fff9f8c5f62c45d6fd3cdfdb41a7375 | 287ba6b30c4c8bff1e28160ab6168f703d3d3ee9 | /plot4.R | c711ab31125b3e62bb2cb813e839215a6162894a | [] | no_license | tmawyin/ExData_Plotting1 | a7d0a8d56908152658962a10988e96224faf54af | 056cf0a6f058bf4ab025c80e330d1410449190f4 | refs/heads/master | 2020-12-25T13:23:51.548497 | 2015-08-05T02:49:07 | 2015-08-05T02:49:07 | 40,220,872 | 0 | 0 | null | 2015-08-05T02:45:47 | 2015-08-05T02:45:47 | null | UTF-8 | R | false | false | 1,358 | r | plot4.R | ## Exploratory Data Analysis
## Project 1 - Figure 4
## by Tomas Mawyin
## This generates 4 subplots to show different results
## Loading the file
source("load.file.R")
df <- load.file()
## Setting up the subplot, we will keep the default margins
par(mfcol = c(2,2))#, mar = c(5,4.1,2,4))
## Let's generate the first plot:
with(df, plot(Time, Global_active_power, type = "l",
ylab = "Global Active Power (kilowatts)",
main = NA, xlab = NA))
## Generate the next plot on the second row, first column
with(df, {plot(Time, Sub_metering_1, type = "l", col = "black",
ylab = "Energy sub metering")
lines(Time, Sub_metering_2, type = "l", col = "orange", ylab = NA)
lines(Time, Sub_metering_3, type = "l", col = "blue", ylab = NA)})
## Let's add the legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1), lwd = c(1,1,1), col = c("black","orange","blue"),
bty = "n", cex=0.6)
## Adding the 3rd plot
with(df, plot(Time, Voltage, type = "l",
ylab = "Voltage",
main = NA, xlab = "datetime"))
## The final plot
with(df, plot(Time, Global_reactive_power, type = "l",
main = NA, xlab = "datetime"))
## Let's save the png file
dev.copy(png, filename = "plot4.png", width=480, height=480, units="px")
dev.off()
|
9a31e99a6fc1c56fe15af7d7b4fd942add5595e7 | db50861ffa9476c537645c671ce1f401eb9f3aed | /plot1.R | f974e37ca01a25244440f81b6773722dce220af6 | [] | no_license | andyras/ExData_Plotting1 | f2c923c04f2f4a48ba9bc2652db4ab42ff95ce06 | 5c81bc983ef5887b6c06fd9c8d29d90870257a08 | refs/heads/master | 2020-04-29T14:16:42.381166 | 2016-01-10T18:56:16 | 2016-01-10T18:56:16 | 49,277,583 | 0 | 0 | null | 2016-01-08T14:49:32 | 2016-01-08T14:49:31 | null | UTF-8 | R | false | false | 786 | r | plot1.R | require(sqldf)
if (!file.exists("household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip", method = "curl")
unzip("household_power_consumption.zip")
}
if (!exists("elData")) {
elData <- read.csv.sql("household_power_consumption.txt", sep = ";", sql = 'select * from file where Date="1/2/2007" or Date="2/2/2007"')
# convert date to useful
elData$Date <- strptime(paste(elData$Date, elData$Time), "%m/%d/%Y %H:%M:%S")
elData$Time <- NULL # get rid of useless column
}
# plot 1
png(file = "plot1.png", width = 480, height = 480)
hist(elData$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off() |
b9cd233a3d61d4504603df77642b04b912d052f4 | 6afd9380dadfa5a428a8f93d44c303c147a46865 | /rplots/demo_pheatmap.R | 6dead8210787f19a3b4185f5474773f451fc7d45 | [] | no_license | 1156054203/scripts | 403535cf3e8cef3e29cac2ae1103029bde3108bf | 63541fc1b4819e792b0b8538cd3d4eb2fb39f087 | refs/heads/master | 2021-12-23T19:20:43.038437 | 2021-11-01T03:47:35 | 2021-11-01T03:47:35 | 148,399,604 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,497 | r | demo_pheatmap.R | library(pheatmap)
#library(pheatmap)
#pdf("pheatmap.pdf",width=6,height=15)
#png("pheatmap.png",width=480,height=480)
#pdf("pheatmap.pdf",width=6,height=15)
p <- read.table("test.xls",header=T,sep="\t")
rownames(p)=p$EnsemblGene_GeneSymbol
colnames(p)=head(p,1)
p <- p[,-1]
p_matrix <- as.matrix(p)
p_matrix <- log10(p_matrix + 1)
annotation_col=data.frame(Sample=factor(c("S1","S2","S3","S4","S5","S6")),time=1:6)
rownames(annotation_col)=colnames(p)
annotation_row=data.frame(GeneClass=factor(rep(c("Path1","Path2","Path3"),c(9,2,2))))
rownames(annotation_row)=rownames(p)
ann_colors=list(Time=c("white","firebrick"),Sample=c(S1="#1B9E77",S2="#D95F02",S3="red",S4="blue",
S5="green",S6="yellow"),GeneClass=c(Path1="#7570B3",Path2 ="#E7298A",Path3 ="#66A61E"))
pheatmap(p_matrix,color=colorRampPalette(c("blue","white","red")) (100) ,
scale="column",border_color="grey60",show_rownames=T,show_colnames=T,
clustering_distance_rows="euclidean",clustering_distance_cols="euclidean",clustering_method="complete",
fontsize_col=6,fontsize_row=6,fontsize_number=8,fontsize=9,main="Test pheatmap",legend=T,
display_numbers=T,number_color="black",annotation_colors=ann_colors,cellwidth=40,
cellheight=15,annotation_col=annotation_col,annotation_row=annotation_row,
annotation_legend=T,cutree_rows=3,cutree_cols=2,filename="C:/Users/chen_yulong/Desktop/R/Example_pheatmap.png")
#dev.off()
|
dbdfaac842c2f2c674566aa9c007a8648df25a4d | 6da672676b4e7023db0c00978bce2588c2b18beb | /R/prefixes.R | 46925b9862d42c48dc3f4700ca88885956799eca | [] | no_license | cran/sitools | 707209bdb38c1a8545af7ac5358e1524da99efe5 | e84ffa3518b8c2befac9b2e87c2d5d00f15e3b34 | refs/heads/master | 2020-05-16T21:51:18.559142 | 2012-08-21T00:00:00 | 2012-08-21T00:00:00 | 17,699,692 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 268 | r | prefixes.R | yotta <- 1e24
zetta <- 1e21
exa <- 1e18
peta <- 1e15
tera <- 1e12
giga <- 1e9
mega <- 1e6
kilo <- 1e3
hecto <- 1e2
deca <- 1e1
deci <- 1e-1
centi <- 1e-2
milli <- 1e-3
micro <- 1e-6
nano <- 1e-9
pico <- 1e-12
femto <- 1e-15
atto <- 1e-18
zepto <- 1e-21
yocto <- 1e-24
|
ab2521aeefae0dadb8edf13c0fc69809290f2026 | 01a4035babde87ed8aa9dea267d49cbc98072d3f | /tests/testthat/test-deferred-then.R | f6058e75ca5e032fdcd83e60e0574a53240006b9 | [
"MIT"
] | permissive | behnido/async | 852770ccc9f609f5b2249b94decf878ff4e5a151 | 73309161e353d917a34b51fa5484d23f1a4ab565 | refs/heads/master | 2021-08-18T18:51:35.647183 | 2017-11-23T14:57:27 | 2017-11-23T14:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,329 | r | test-deferred-then.R |
context("deferred then")
test_that("HTTP HEAD & synchronous then", {
skip_if_offline()
do <- async(function() {
result <- await(
dx <- http_head("https://eu.httpbin.org")$
then(function(value) value$status_code)
)
expect_equal(result, 200)
expect_equal(dx$get_value(), 200)
expect_equal(await(dx), 200)
})
synchronise(do())
})
test_that("HTTP HEAD & async then", {
skip_if_offline()
do <- async(function() {
result <- await(
dx <- http_head("https://eu.httpbin.org")$
then(function(value) http_get(value$url))
)
expect_equal(result$status_code, 200)
expect_equal(dx$get_value()$status_code, 200)
expect_equal(await(dx)$status_code, 200)
})
synchronise(do())
})
test_that("HTTP HEAD & async then & sync then", {
skip_if_offline()
do <- async(function() {
result <- await(
dx <- http_head("https://eu.httpbin.org") $
then(function(value) http_get(value$url)) $
then(function(value) value$status_code)
)
expect_equal(result, 200)
expect_equal(dx$get_value(), 200)
expect_equal(await(dx), 200)
})
synchronise(do())
})
test_that("then for fulfilled", {
skip_if_offline()
do <- async(function() {
await(dx <- http_head("https://eu.httpbin.org/status/404"))
result <- await(dx$then(function(value) value$status_code))
expect_equal(result, 404)
})
synchronise(do())
})
test_that("multiple then clauses", {
skip_if_offline()
do <- async(function() {
dx <- http_head("https://eu.httpbin.org/status/404")
dx2 <- dx$then(function(value) http_get(value$url))
dx3 <- dx$then(function(value) value$status_code)
dx4 <- dx$then(function(value) http_head(value$url))
result <- await_all(dx2, dx3, dx4)
expect_equal(result[[1]]$status_code, 404)
expect_equal(result[[2]], 404)
expect_equal(result[[3]]$url, dx$get_value()$url)
expect_equal(dx2$get_value()$status_code, 404)
expect_equal(dx3$get_value(), 404)
expect_equal(dx4$get_value()$url, dx$get_value()$url)
})
synchronise(do())
})
test_that("compact function notation", {
skip_if_offline()
do <- async(function() {
result <- await(
dx <- http_head("https://eu.httpbin.org") $
then(~ http_get(.$url)) $
then(~ .$status_code)
)
expect_equal(result, 200)
expect_equal(dx$get_value(), 200)
expect_equal(await(dx), 200)
})
synchronise(do())
})
test_that("embedded then", {
add1 <- function(n) { n ; delay(10/1000)$then(function(value) n + 1) }
mul3 <- function(n) { n ; delay(10/1000)$then(function(value) n * 3) }
do <- async(function() {
result <- await(add1(4)$then(mul3))
expect_equal(result, 15)
})
synchronise(do())
})
test_that("more embedded thens", {
do <- async(function() {
steps <- numeric()
dx <- async(function() steps <<- c(steps, 1))()$
then(function() {
async_constant()$
then(function() steps <<- c(steps, 2))$
then(function() steps <<- c(steps, 3))
})$
then(function() {
async_constant()$
then(function() steps <<- c(steps, 4))$
then(function() steps <<- c(steps, 5))
})$
then(function() steps <<- c(steps, 6))
await(dx)
expect_equal(steps, 1:6)
})
synchronise(do())
})
|
6cfbf5d4fd204931f4612e643104de8cdf14017a | 2975fba6bf359214c55e7d936f896a5a4be3d8f5 | /R/sim.synth.R | c5560ce989b06e6c4a946b066fa501bcda4d9b12 | [] | no_license | tagteam/riskRegression | 6bf6166f098bbdc25135f77de60122e75e54e103 | fde7de8ca8d4224d3a92dffeccf590a786b16941 | refs/heads/master | 2023-08-08T03:11:29.465567 | 2023-07-26T12:58:04 | 2023-07-26T12:58:04 | 36,596,081 | 38 | 14 | null | 2023-05-17T13:36:27 | 2015-05-31T09:22:16 | R | UTF-8 | R | false | false | 1,830 | r | sim.synth.R | ### sim.synth.R ---
#----------------------------------------------------------------------
## Author: Thomas Alexander Gerds
## Created: Jul 7 2022 (13:33)
## Version:
## Last-Updated: Jul 7 2022 (14:26)
## By: Thomas Alexander Gerds
## Update #: 9
#----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
#----------------------------------------------------------------------
##
### Code:
#' @title Simulating from a synthesized object
#'
#' @description Simulating from a synthesized object
#' @param object generated with \code{synthesize}
#' @param n sample size
#' @param drop.latent if \code{TRUE} remove the latent event times from the resulting data set.
#' @param ... additional arguments passed on to \code{lava::sim}
#' @export simsynth
#' @examples
#' library(survival)
#' m=synthesize(Surv(time,status)~sex+age+bili,data=pbc)
#' simsynth(m,10,drop.latent=TRUE)
#'
simsynth <- function(object, n= 200, drop.latent=FALSE, ...){
lava.object <- object$lava.object
res <- lava::sim(lava.object,n,...)
labels <- object$labels
for (var in names(labels)){
res[[var]] <- factor(res[[var]])
levels(res[[var]]) <- labels[[var]]
}
# remove variables that would not be in the original data set
if (drop.latent){
# remove latent times
if (length(lava.object$attributes$eventHistory$time$latentTimes)>0)
res <- res[,-match(lava.object$attributes$eventHistory$time$latentTimes,names(res),nomatch=0)]
# remove dummy variables
categories <- object$categories
for (c in categories) {
res <- res[,-grep(c,names(res))[-1]]
}
}
res
}
######################################################################
### sim.synth.R ends here
|
f5d572410e671afcd151fc73fd7d8237a5847dc2 | bddc51485e55158c0d84ba8bd6c4f0a86de115d4 | /R/func_process_aircraft2.R | 36126dbc51796d9dff8327d7b627aa9b44181465 | [] | no_license | willdrysdale/wsdmiscr | 4b74ea6d6e0999f36013d8edef55125568b94586 | 60d548c5816e18b03abd6b26ad9c6e0892e797e1 | refs/heads/master | 2023-08-27T08:13:43.162945 | 2023-08-08T12:30:56 | 2023-08-08T12:30:56 | 86,581,767 | 0 | 2 | null | 2017-05-25T10:32:06 | 2017-03-29T12:52:59 | R | UTF-8 | R | false | false | 1,253 | r | func_process_aircraft2.R | #' Process Aircraft Data2
#'
#' Reads in an AQD NOx aircraft file and runs aircraft_cal_flags, prodcuing quicklook and full outputs
#' Used on Files after flight C019
#'
#'
#' @param fn file to read
#' @param dir_out output directory
#' @param flight_id ID of flight
#'
#' @return Saves two csvs into the output directory "processed" full output and "quicklook" only containing
#' time and concetrations. returns a list containg these two dataframes
#'
#' @author Will S. Drysdale
#' @author Freya A. Squires
#'
#' @export
process.aircraft2 = function(fn,dir_out = "",flight_id = "XXXX"){
d = read.aircraft(fn)
d_processed = aircraft_cal_flags(d)
d_quick = d_processed[,c(1,65,66,67)]
d_final = aircraft_clean_negatives(d_quick)
write.csv(d_processed,paste(dir_out,flight_id,"_processed.csv",sep = ""),row.names = F,na = "")
write.csv(d_quick,paste(dir_out,flight_id,"_quick.csv",sep = ""),row.names = F,na = "")
write.csv(d_final,paste(dir_out,flight_id,"_final.csv",sep = ""),row.names = F,na = "")
out = list(d_processed,d_quick,d_final)
names(out) = c(paste(flight_id,"_processed",sep = ""),
paste(flight_id,"_quick",sep = ""),
paste(flight_id,"_final",sep = "")
)
out
} |
a110546fd7c593ddbb0577a3805713c0736b5c84 | b9841a1faf323c23306c4d75dbbc2b82ae48c4e8 | /Localize.r | 290bce1b0c54372d8b2e05fa847bb61d81d63c79 | [] | no_license | steventroughtonsmith/SimpleText | 4f2401b4d4960768efbd3d4fc1ccd50bf6adf88b | d389f4679bdd731c80541e1dbf4edcc9a0c0f06f | refs/heads/master | 2020-06-04T12:32:33.392136 | 2015-01-14T00:56:49 | 2015-01-14T00:56:49 | 29,219,728 | 13 | 0 | null | null | null | null | MacCentralEurope | R | false | false | 26,463 | r | Localize.r | /*
File: Localize.r
Contains: all strings for SimpleText that need to be localized.
Nothing in a .r file outside of this file should be language specific.
Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc.
("Apple") in consideration of your agreement to the following terms, and your
use, installation, modification or redistribution of this Apple software
constitutes acceptance of these terms. If you do not agree with these terms,
please do not use, install, modify or redistribute this Apple software.
In consideration of your agreement to abide by the following terms, and subject
to these terms, Apple grants you a personal, non-exclusive license, under Apple’s
copyrights in this original Apple software (the "Apple Software"), to use,
reproduce, modify and redistribute the Apple Software, with or without
modifications, in source and/or binary forms; provided that if you redistribute
the Apple Software in its entirety and without modifications, you must retain
this notice and the following text and disclaimers in all such redistributions of
the Apple Software. Neither the name, trademarks, service marks or logos of
Apple Computer, Inc. may be used to endorse or promote products derived from the
Apple Software without specific prior written permission from Apple. Except as
expressly stated in this notice, no other rights or licenses, express or implied,
are granted by Apple herein, including but not limited to any patent rights that
may be infringed by your derivative works or by other works in which the Apple
Software may be incorporated.
The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO
WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED
WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN
COMBINATION WITH YOUR PRODUCTS.
IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION
OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, TORT
(INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright © 1993-2000 Apple Computer, Inc., All Rights Reserved
*/
// --------------------------------------------------------------------------------------------------------------
// Basic application information
#define APPNAME "SimpleText"
// Version number is "MAJOR.MINORHIGHNIBBLE.MINORLONIBBLE"
#define MAJORVERSION 0x01
#define MINORVERSION 0x40
#define STAGE final
#define RELEASE 0
#define SHORTVERSIONSTRING "Carbon Sample Code"
#define COPYRIGHTNOTICE " © Apple Computer, Inc. 1985-2000"
#define COUNTRYVERSION verUS
#define LONGVERSIONSTRING APPNAME " " SHORTVERSIONSTRING ", " COPYRIGHTNOTICE
// open kinds
#define READONLYDOC "SimpleText read-only document",
#define PICTDOC "SimpleText picture",
#define MOVIEDOC "SimpleText movie",
#define PRINTDOC "SimpleText print document",
#define PDDDOC "portable digital document",
#define STATIONERYDOC "SimpleText stationery file",
#define TEXTDOC "SimpleText text document"
// Window titles
#define FIRSTNEWDOCUMENTTITLE "untitled"
#define NEWDOCUMENTTITLE "untitled ^0"
#define ABOUTBOXTITLE "About " APPNAME "…"
#define CLIPBOARDTITLE "Clipboard"
// Misc strings
#define ABOUTSTRING1 APPNAME
#define ABOUTSTRING2 ""
#define ABOUTSTRING3 "Worldwide Developer Technical Support"
#define ABOUTSTRING4 ""
#define ABOUTSTRING5 COPYRIGHTNOTICE
#define ABOUTSTRING6 SHORTVERSIONSTRING
#define CLIPBOARDNONESTRING "Clipboard contents: none"
#define CLIPBOARDUNKNOWNSTRING "Clipboard contents: unknown type"
#define CLIPBOARDPICTSTRING "Clipboard contents: picture"
#define CLIPBOARDTEXTSTRING "Clipboard contents: text"
#define CLIPBOARDHIDE "Hide Clipboard"
#define CLIPBOARDSHOW "Show Clipboard"
#define TEXTSAVEPROMPT "Save this document as:"
#define TEXTSAVEBUTTON "Save"
#define TEXTCANCELBUTTON "Cancel"
#define TEXTEJECTBUTTON "Eject"
#define TEXTDESKTOPBUTTON "Desktop"
#define TEXTSTATIONERY "To save this document as a stationery pad (a template document) click this button before clicking the Save button."
#define TEXTSTATIONERYSELECTED "When this button is selected, the document you are saving will be saved as a stationery pad (a template document)."
#define TEXTDOCUMENT "To save your document as a "APPNAME" document, click this button."
#define TEXTDOCUMENTSELECTED "When this button is selected, your document will be saved as a "APPNAME" document."
#define TEXTPICTMARKER1 "\$CA"
#define TEXTPICTMARKER2 ""
#define HELPMENUCOMMAND "?"
// Save changes alert strings
#define SAVESAVEBUTTON "Save"
#define SAVECANCELBUTTON "Cancel"
#define SAVEDONTSAVEBUTTON "Don’t Save"
#define SAVEBUTTONSHORTCUTS "Dd"
#define SAVESTATICTEXT "Save changes to the document “^0” before closing?"
// Go to page alert strings
#define GOTOOKBUTTON "OK"
#define GOTOCANCELBUTTON "Cancel"
#define GOTOSTATICTEXT "Go to page"
#define GOTOOFSTATICTEXT "of ^0."
#define GOTOPAGESLIDERSTRING "Go to page: "
#define PAGELABELSTRING "^0 of ^1 (^2%)"
// Find/Replace alert strings
#define FINDBUTTON "Find"
#define FINDCANCELBUTTON "Cancel"
#define REPLACEBUTTON "Replace"
#define REPLACEALLBUTTON "Replace All"
#define FINDCASESENSITIVE "Case Sensitive"
#define FINDWRAPAROUND "Wrap–Around Search"
#define FINDSTATIC "Find what?"
#define REPLACESTATIC "Replace with what?"
// Error alerts
#define ERROKBUTTON "OK"
#define ERRCANCELBUTTON "Cancel"
#define ERRSTARTUPVERSION APPNAME " requires System 7 in order to run."
#define ERRSTARTUPFAILED APPNAME " is unable to run because of a serious error."
#define ERRNEWFAILEDRAM APPNAME" cannot make a new document because there is not enough memory."
#define ERROPENFAILED APPNAME" cannot open this document. It may be in use by someone else."
#define ERRSAVEFAILED APPNAME" cannot save this document. This disk may be full or locked, or the file may be locked."
#define ERRMODFAILED "You cannot modify this document; you can only look at it."
#define ERRSETUPFAILED APPNAME" is unable to setup the page for this document. Please use the Chooser to select a printer."
#define ERRPRINTMEM APPNAME" cannot print this document because there is not enough memory."
#define ERRPAGESETUPMEM APPNAME" cannot perform a Page Setup for this document because there is not enough memory."
#define ERRPRINTDISKFULL APPNAME" cannot print this document because there is not enough room available on the hard disk."
#define ERRPRINTTYPE APPNAME" cannot print this kind of document."
#define ERRPRINTFAILED APPNAME" is unable to print this document. Please use the Chooser to select a printer."
#define ERROPENSIZE "This document is too large to be opened by "APPNAME"."
#define ERRPASTESIZE "This document is too large to accept the text from the clipboard."
#define ERRTYPESIZE "This document is too large to accept any more text."
#define ERRSIZECHANGE "Your requested change would cause this document to become too long."
#define ERROPENTYPE APPNAME" cannot display this kind of document."
#define ERRCOPYTOOBIG APPNAME" cannot copy from this document, the selection is too large."
#define ERRSAVETYPE APPNAME" cannot replace a different kind of document."
#define ERROPENOUTOFMEM APPNAME" cannot display this document because there is not enough memory."
#define ERROPENDRAW APPNAME" cannot display this document because an error occurred while drawing."
#define ERROPENPS APPNAME" may not be able to display this document properly because it contains PostScript®."
#define ERROPENNOPAGES APPNAME" cannot display this document because it contains no pages."
#define ERRRECORDFULL APPNAME" cannot record at this moment because there is not enough memory."
#define ERRRECORDERR APPNAME" cannot record at this moment. Another application may be using Sound Input."
#define ERRSPEAKFULL "There is not enough memory to speak. Try increasing " APPNAME "'s partition size."
#define ERRSPEAKERR "An error occurred when " APPNAME " tried to speak."
#define ERRCHANGEVOICE APPNAME " is unable to change to the new voice. Try increasing " APPNAME "'s partition size."
#define ERROPENANOTHERFAILED APPNAME " is unable to open another document at this time. Cancel the current Print or Page Setup dialog and try again."
#define ERRPRINTANOTHERFAILED APPNAME " is unable to print another document at this time. Cancel the current Print or Page Setup dialog and try again."
// Apple menu strings
#define ABOUTITEM "About " APPNAME "…"
// File menu strings
#define FILEMENU "File"
#define FILENEWITEM "New"
#define FILENEWKEY "N"
#define FILEOPENITEM "Open…"
#define FILEOPENKEY "O"
#define FILECLOSEITEM "Close"
#define FILECLOSEKEY "W"
#define FILESAVEITEM "Save"
#define FILESAVEKEY "S"
#define FILESAVEASITEM "Save As…"
#define FILESAVEASKEY nokey
#define FILEPAGESETUPITEM "Page Setup…"
#define FILEPAGESETUPKEY nokey
#define FILEPRINTITEM "Print…"
#define FILEPRINTKEY "P"
#define FILEPRINTONECOPYITEM "Print One Copy"
#define FILEPRINTONECOPYKEY nokey
#define FILEQUITITEM "Quit"
#define FILEQUITKEY "Q"
// Edit menu strings
#define EDITMENU "Edit"
#define EDITUNDOITEM "Undo"
#define EDITUNDOKEY "Z"
#define EDITCUTITEM "Cut"
#define EDITCUTKEY "X"
#define EDITCOPYITEM "Copy"
#define EDITCOPYKEY "C"
#define EDITPASTEITEM "Paste"
#define EDITPASTEKEY "V"
#define EDITCLEARITEM "Clear"
#define EDITCLEARKEY nokey
#define EDITSELECTALLITEM "Select All"
#define EDITSELECTNONEITEM "Remove Selection"
#define EDITSELECTALLKEY "A"
#define FINDITEM "Find…"
#define FINDKEY "F"
#define FINDAGAINITEM "Find Again"
#define FINDAGAINKEY "G"
#define FINDSELECTIONITEM "Find Selection"
#define FINDSELECTIONKEY "H"
#define REPLACEITEM "Replace…"
#define REPLACEKEY "R"
#define REPLACEAGAINITEM "Replace Again"
#define REPLACEAGAINKEY "D"
#define EXECUTEITEM "Execute" // for AppleScript
#define EXECUTEKEY "=" // for AppleScript
#define EDITNEXTPAGEITEM "Next Page"
#define EDITNEXTPAGEKEY "+"
#define EDITPREVPAGEITEM "Previous Page"
#define EDITPREVPAGEKEY "-"
#define EDITGOTOPAGEITEM "Go to Page…"
#define EDITGOTOPAGEKEY nokey
#define EDITSHOWCLIPBOARDITEM "Show Clipboard "
#define EDITSHOWCLIPBOARDKEY nokey
// Font menu strings
#define FONTMENU "Font"
// Size menu strings
#define SIZEMENU "Size"
#define SIZE9POINT "9 Point"
#define SIZE9POINTKEY nokey
#define SIZE10POINT "10 Point"
#define SIZE10POINTKEY nokey
#define SIZE12POINT "12 Point"
#define SIZE12POINTKEY nokey
#define SIZE14POINT "14 Point"
#define SIZE14POINTKEY nokey
#define SIZE18POINT "18 Point"
#define SIZE18POINTKEY nokey
#define SIZE24POINT "24 Point"
#define SIZE24POINTKEY nokey
#define SIZE36POINT "36 Point"
#define SIZE36POINTKEY nokey
// Style menu strings
#define STYLEMENU "Style"
#define STYLEPLAIN "Plain Text"
#define STYLEPLAINKEY "T"
#define STYLEBOLD "Bold"
#define STYLEBOLDKEY "B"
#define STYLEITALIC "Italic"
#define STYLEITALICKEY "I"
#define STYLEUNDERLINE "Underline"
#define STYLEUNDERLINEKEY "U"
#define STYLEOUTLINE "Outline"
#define STYLEOUTLINEKEY nokey
#define STYLESHADOW "Shadow"
#define STYLESHADOWKEY nokey
#define STYLECONDENSED "Condensed"
#define STYLECONDENSEDKEY nokey
#define STYLEEXTENDED "Extended"
#define STYLEEXTENDEDKEY nokey
// Sound menu strings
#define SOUNDMENU "Sound"
#define SOUNDRECORDITEM "Record…"
#define SOUNDRECORDKEY nokey
#define SOUNDPLAYITEM "Play"
#define SOUNDPLAYKEY nokey
#define SOUNDERASEITEM "Erase"
#define SOUNDERASEKEY nokey
#define SOUNDSPEAKSELECTIONITEM "Speak Selection"
#define SOUNDSPEAKALLITEM "Speak All"
#define SOUNDSPEAKKEY "J"
#define SOUNDSTOPSPEAKINGITEM "Stop Speaking"
#define SOUNDSTOPSPEAKINGKEY "."
#define SOUNDVOICESITEM "Voices"
#define SOUNDVOICESKEY nokey
#define NOVOICESITEM "None"
#define NOVOICESKEY nokey
// items in the GX pop up menu
#define GXPOPUPMENU "Display Options"
#define GXFIFTY "50%"
#define GXONEHUNDRED "100%"
#define GXONEHUNDREDTWELVE "112%"
#define GXONEHUNDREDFIFTY "150%"
#define GXTWOHUNDRED "200%"
#define GXFOURHUNDRED "400%"
#define GXSCALETOFIT "Scale image to fit window"
#define GXDONTSHOWMARGINS "Don’t show margins"
// Balloon Help Strings
#define FINDERHELPSTRING APPNAME "\n\n" "This is a simple application program that you can use to view Read Me files, text files, movies, 3D files, and certain graphics files, and to write letters and memos."
// Apple menu help
#define HELPABOUTITEM "Displays information about the version of the "APPNAME" application."
// File menu help
#define HELPFILEMENU "File menu\n\nUse this menu to open, close, save, and print "APPNAME" documents, and to quit "APPNAME"."
#define HELPNEWE "Opens a new "APPNAME" document called “untitled.”"
#define HELPNEWD "Opens a new "APPNAME" document called “untitled.” Not available because there is a dialog box on the screen."
#define HELPOPENE "Displays a dialog box that allows you to select an existing "APPNAME" document to open."
#define HELPOPEND "Displays a dialog box that allows you to select an existing "APPNAME" document to open. Not available because there is a dialog box on the screen."
#define HELPCLOSEE "Closes the open "APPNAME" document or window."
#define HELPCLOSED "Closes the open "APPNAME" document or window. Not available because no "APPNAME" document is open."
#define HELPSAVEE "Saves the open "APPNAME" document. If you have not saved the document before, a dialog box appears in which you assign a name to the document and indicate where to save it."
#define HELPSAVED "Saves the open "APPNAME" document. Not available because no "APPNAME" document is open, because no changes have been made to the open document, or because the open document is a special type that cannot be changed or saved."
#define HELPSAVEASE "Displays a dialog box in which you can assign a name to the document and indicate where to save it."
#define HELPSAVEASD "Displays a dialog box in which you can name and save the open document. Not available because no "APPNAME" document is open, because no changes have been made to the open document, or because the open document is a special type that cannot be changed."
#define HELPPAGESETUPE "Displays a dialog box in which you can select paper size, orientation, and other printing options."
#define HELPPAGESETUPD "Displays a dialog box in which you can select paper size, orientation, and other printing options. Not available because the current window cannot be printed."
#define HELPPRINTE "Displays a dialog box in which you can specify the number of copies you want to print and other printing options."
#define HELPPRINTD "Displays a dialog box in which you can specify the number of copies you want to print and other printing options. Not available because no "APPNAME" document is open or because the open "APPNAME" document is empty."
#define HELPPRINTONEE "Prints a single copy of the "APPNAME" document."
#define HELPPRINTONED "Prints a single copy of the "APPNAME" document. Not available because no "APPNAME" document is open or because the open "APPNAME" document is empty."
#define HELPQUITE "Quits the "APPNAME" application. If you have not saved changes you made to the open document, you will be asked whether you want to save changes."
#define HELPQUITD "Quits the "APPNAME" application. Not available because there is a dialog box on the screen."
// Edit menu help
#define HELPEDITMENU "Edit menu\n\nUse this menu to undo your last action, to manipulate text or graphics, to select the entire contents of a document, and to show what’s on the Clipboard."
#define HELPUNDOE "Undoes your last action. In "APPNAME", you can use this command to replace material you have cut or cleared or to remove material you have pasted or typed."
#define HELPUNDOD "Undoes your last action if it involved cutting, clearing, pasting, or typing. Not available now because your last action cannot be undone."
#define HELPCUTE "Removes the selected text or graphics and places it temporarily into a storage area called the Clipboard."
#define HELPCUTD "Removes the selected text or graphics and places it temporarily into a storage area called the Clipboard. Not available now because nothing is selected."
#define HELPCOPYE "Copies the selected text or graphics. The original selection remains where it is. The copy is placed temporarily into a storage area called the Clipboard."
#define HELPCOPYD "Copies the selected text or graphics. The original selection remains where it is. The copy is placed temporarily into a storage area called the Clipboard. The command is not available now because nothing is selected."
#define HELPPASTEE "Inserts the contents of the Clipboard at the location of the insertion point."
#define HELPPASTED "Inserts the contents of the Clipboard at the location of the insertion point. Not available because there is nothing on the Clipboard or because the contents of the Clipboard are of a type "APPNAME" can’t read."
#define HELPCLEARE "Removes the selected text or graphics without storing it on the Clipboard."
#define HELPCLEARD "Removes the selected text or graphics without storing it on the Clipboard. Not available now because nothing is selected."
#define HELPSELECTALLE "Selects or deselects (for pictures) the entire contents of the open "APPNAME" document."
#define HELPSELECTALLD "Selects or deselects (for pictures) the entire contents of the open "APPNAME" document. Not available now because no "APPNAME" document is open or because the open document is empty."
#define HELPNEXTPAGEE "Displays the next page in the document."
#define HELPNEXTPAGED "Displays the next page in the document. Disabled because this document contains no additional pages."
#define HELPPREVPAGEE "Displays the previous page in the document."
#define HELPPREVPAGED "Displays the previous page in the document. Disabled because this document contains no additional pages."
#define HELPGOTOPAGEE "Displays the specified page in the document."
#define HELPGOTOPAGED "Displays the specified page in the document. Disabled because this type of document contains no additional pages, or does not support this command."
#define HELPSHOWCLIPBOARDE "Displays the contents of the Clipboard (a storage area for the last item cut or copied)."
#define HELPHIDECLIPBOARDE "Closes the Clipboard window."
#define HELPFINDE "Allows you to search the current window for particular text."
#define HELPFINDD "Allows you to search the current window for particular text. Disabled because the current window is not searchable."
#define HELPFINDAGAINE "Searches the current window for another occurrence of the found text."
#define HELPFINDAGAIND "Searches the current window for another occurrence of the found text. Disabled because the current window is not searchable, or because you have not entered text to find."
#define HELPFINDSELECTIONE "Allows you to search the current window for another occurrence of the selected text."
#define HELPFINDSELECTIOND "Allows you to search the current window for another occurrence of the selected text. Disabled because the current window is not searchable, or because no text is currently selected."
#define HELPREPLACEE "Allows you to search the current window for particular text and replace it."
#define HELPREPLACED "Allows you to search the current window for particular text and replace it. Disabled because the current window is not searchable."
#define HELPREPLACEAGAINE "Searches the current window for another occurrence of the found text and replaces it."
#define HELPREPLACEAGAIND "Searches the current window for another occurrence of the found text and replaces it. Disabled because the current window is not searchable, or because you have not entered text to replace."
#define HELPEXECUTEE "Executes text as an AppleScript script."
#define HELEXECUTEND "Executes text as an AppleScript script. Disabled because no text is selected."
// Font menu help
#define HELPFONTMENUE "Font menu\n\nUse this menu to change the font used for text in a document."
#define HELPFONTMENUD "Font menu\n\nUse this menu to change the font used for text in a document. Not available because the active document is not a text document."
#define HELPFONTITEME "To use this font in the active document, choose it from this menu."
#define HELPFONTITEMD "To use this font in the active document, choose it from this menu. Not available because the active document is not a text document."
#define HELPFONTITEMC "This is the currently selected font. To change to another font, choose one from this menu."
// Style menu help
#define HELPSTYLEMENUE "Style menu\n\nUse this menu to change the style used for text in a document."
#define HELPSTYLEMENUD "Style menu\n\nUse this menu to change the style used for text in a document. Not available because the active document is not a text document."
#define HELPSTYLEITEME "To use this style in the active document, choose it from this menu."
#define HELPSTYLEITEMD "To use this style in the active document, choose it from this menu. Not available because the active document is not a text document."
#define HELPSTYLEITEMC "This is the currently selected style. To change to another style, choose one from this menu."
// Size menu help
#define HELPSIZEMENUE "Size menu\n\nUse this menu to change the font size used for text in a document."
#define HELPSIZEMENUD "Size menu\n\nUse this menu to change the font size used for text in a document. Not available because the active document is not a text document."
#define HELPSIZEITEME "To use this font size in the active document, choose it from this menu."
#define HELPSIZEITEMD "To use this font size in the active document, choose it from this menu. Not available because the active document is not a text document."
#define HELPSIZEITEMC "This is the currently selected font size. To change to another size, choose one from this menu."
// Sound menu help
#define HELPSOUNDMENU "Sound menu\n\nUse this menu to record, play, or erase a voice annotation, or to hear the current text spoken."
#define HELPRECORDE "Allows you to record a new voice annotation."
#define HELPRECORDD "Allows you to record a new voice annotation. Not available because there is already a recorded voice annotation, your Macintosh does not support sound input, or this is a read–only document."
#define HELPPLAYE "Plays the voice annotation."
#define HELPPLAYD "Plays the voice annotation. Not available because there is no recorded voice annotation."
#define HELPERASEE "Erases the current voice annotation so that you can record a new one."
#define HELPERASED "Erases the current voice annotation so that you can record a new one. Not available because there is no recorded voice annotation, or this is a read–only document."
#define HELPSPEAKALLE "Speaks all the text in the document aloud, using the currently checked voice from the Voices submenu, below."
#define HELPSPEAKALLD "Speaks text aloud. Not available, because Text-To-Speech is not installed, this document cannot be spoken, or there is not enough memory to speak (increase " APPNAME "'s partition size)."
#define HELPSPEAKSELECTIONE "Speaks the currently selected text aloud, using the currently checked voice from the Voices submenu, below."
#define HELPSPEAKSELECTIOND "Speaks the currently selected text aloud. Not available, because Text-To-Speech is not installed, this document cannot be spoken, or there is not enough memory to speak (increase " APPNAME "'s partition size)."
#define HELPSTOPSPEAKINGE "Quiets the speaking you hear right now."
#define HELPSTOPSPEAKINGD "Quiets any speaking you started. Not available because there is nothing being spoken at the moment."
#define HELPVOICESE "Has submenu which allows you to pick the voice used when speaking."
#define HELPVOICESD "Has submenu which allows you to pick which voice to speak with. Not available now because speech is not available. Either speech is not installed or there is not enough memory to speak (increase " APPNAME "'s partition size)."
#define HELPVOICESELE "One of the possible voices to speak with. Choose this item to speak with this voice next time you speak."
#define HELPVOICESELC "The voice that will be used to speak with next time you speak."
// Window content help
#define HELPACTIVESCROLL "Scroll bar\n\nTo scroll a little at a time, press one of the scroll arrows. To scroll by the windowful, click in the gray bar. To scroll to another part of the window, drag the scroll box."
#define HELPDIMHORIZSCROLL "Scroll bar\n\nUse the scroll bar to see items that are out of view. This scroll bar is not available because this window contains no items to the left or right of the window’s borders."
#define HELPDIMVERTSCROLL "Scroll bar\n\nUse the scroll bar to see items that are out of view. This scroll bar is not available because this window contains no items above or below the window’s borders."
#define HELPGROWBOX "Size box\n\nTo change the height and width of the window, drag the size box."
#define HELPGENERICCONTENT "This is a document window. You cannot type or edit material in this document because the document is a read-only document."
#define HELPPICTCONT "This is a document window — the area in which you can view and copy images from the "APPNAME" picture document."
#define HELPPICTSEL "The area inside this rectangle is selected. You can copy the selected area to the Clipboard by choosing Copy from the Edit menu."
#define HELPTEXTCONT "This is a document window — the area in which you can type and edit text."
|
97edf3079ddd2aa381ac6b68cce73082e980862b | 7ccf42893e1c4e2ac20d7e06e018e593ef77d42e | /man/bkfilter.Rd | 9d95ef4b4f529cf71d5f6d71bd1aefec7b87a0ea | [] | no_license | cran/mFilter | 56e1081ccfb23b96aae6814acdcadf1935fb6457 | 6fb48d9017a835f5dd719c991867f1c521b43d53 | refs/heads/master | 2021-05-15T02:01:21.277659 | 2019-06-04T21:20:12 | 2019-06-04T21:20:12 | 17,671,609 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,846 | rd | bkfilter.Rd | \name{bkfilter}
\alias{bkfilter}
\title{
Baxter-King filter of a time series
}
\description{
This function implements the Baxter-King approximation to
the band pass filter for a time series. The function computes cyclical
and trend components of the time series using band-pass
approximation for fixed and variable length filters.
}
\usage{
bkfilter(x,pl=NULL,pu=NULL,nfix=NULL,type=c("fixed","variable"),drift=FALSE)
}
\arguments{
\item{x}{a regular time series}
\item{type}{character, indicating the filter type,
\code{"fixed"}, for the fixed length Baxter-King filter
(default),
\code{"variable"}, for the variable length Baxter-King filter.}
\item{pl}{integer. minimum period of oscillation of desired component (pl<=2).}
\item{pu}{integer. maximum period of oscillation of desired component (2<=pl<pu<infinity).}
\item{drift}{logical, \code{FALSE} if no drift in time series
(default), \code{TRUE} if drift in time series.}
\item{nfix}{sets fixed lead/lag length or order of the filter. The
\code{nfix} option sets the order of the filter by 2*nfix+1. The
default is \code{frequency(x)*3}.}
}
\details{
Almost all filters in this package can be put into the
following framework. Given a time series \eqn{\{x_t\}^T_{t=1}} we are
interested in isolating component of \eqn{x_t}, denoted \eqn{y_t} with
period of oscillations between \eqn{p_l} and \eqn{p_u}, where \eqn{2
\le p_l < p_u < \infty}.
Consider the following decomposition of the time series
\deqn{x_t = y_t + \bar{x}_t}
The component \eqn{y_t} is assumed to have power only in the frequencies
in the interval \eqn{\{(a,b) \cup (-a,-b)\} \in (-\pi, \pi)}. \eqn{a}
and \eqn{b} are related to \eqn{p_l} and \eqn{p_u} by
\deqn{a=\frac{2 \pi}{p_u}\ \ \ \ \ {b=\frac{2 \pi}{p_l}}}
If infinite amount of data is available, then we can use the ideal
bandpass filter
\deqn{y_t = B(L)x_t}
where the filter, \eqn{B(L)}, is given in terms of the lag operator
\eqn{L} and defined as
\deqn{B(L) = \sum^\infty_{j=-\infty} B_j L^j, \ \ \ L^k x_t = x_{t-k}}
The ideal bandpass filter weights are given by
\deqn{B_j = \frac{\sin(jb)-\sin(ja)}{\pi j}}
\deqn{B_0=\frac{b-a}{\pi}}
The Baxter-King filter is a finite data approximation to the
ideal bandpass filter with following moving average weights
\deqn{y_t = \hat{B}(L)x_t=\sum^{n}_{j=-n}\hat{B}_{j} x_{t+j}=\hat{B}_0
x_t + \sum^{n}_{j=1} \hat{B}_j (x_{t-j}+x_{t+j})}
where
\deqn{\hat{B}_j=B_j-\frac{1}{2n+1}\sum^{n}_{j=-n}B_{j}}
If \code{drift=TRUE} the drift adjusted series is obtained
\deqn{\tilde{x}_{t}=x_t-t\left(\frac{x_{T}-x_{1}}{T-1}\right), \ \ t=0,1,\dots,T-1}
where \eqn{\tilde{x}_{t}} is the undrifted series.
}
\value{
A "\code{mFilter}" object (see \code{\link{mFilter}}).
}
\references{
M. Baxter and R.G. King. Measuring business cycles: Approximate bandpass
filters. The Review of Economics and Statistics, 81(4):575-93, 1999.
L. Christiano and T.J. Fitzgerald. The bandpass filter. International Economic
Review, 44(2):435-65, 2003.
J. D. Hamilton. \emph{Time series analysis.} Princeton, 1994.
R.J. Hodrick and E.C. Prescott. Postwar US business cycles: an empirical
investigation. Journal of Money, Credit, and Banking, 29(1):1-16, 1997.
R.G. King and S.T. Rebelo. Low frequency filtering and real business cycles.
Journal of Economic Dynamics and Control, 17(1-2):207-31, 1993.
D.S.G. Pollock. Trend estimation and de-trending via rational square-wave
filters. Journal of Econometrics, 99:317-334, 2000.
}
\author{
Mehmet Balcilar, \email{mehmet@mbalcilar.net}
}
\seealso{
\code{\link{mFilter}}, \code{\link{bwfilter}}, \code{\link{cffilter}},
\code{\link{hpfilter}}, \code{\link{trfilter}}
}
\examples{
## library(mFilter)
data(unemp)
opar <- par(no.readonly=TRUE)
unemp.bk <- bkfilter(unemp)
plot(unemp.bk)
unemp.bk1 <- bkfilter(unemp, drift=TRUE)
unemp.bk2 <- bkfilter(unemp, pl=8,pu=40,drift=TRUE)
unemp.bk3 <- bkfilter(unemp, pl=2,pu=60,drift=TRUE)
unemp.bk4 <- bkfilter(unemp, pl=2,pu=40,drift=TRUE)
par(mfrow=c(2,1),mar=c(3,3,2,1),cex=.8)
plot(unemp.bk1$x,
main="Baxter-King filter of unemployment: Trend, drift=TRUE",
col=1, ylab="")
lines(unemp.bk1$trend,col=2)
lines(unemp.bk2$trend,col=3)
lines(unemp.bk3$trend,col=4)
lines(unemp.bk4$trend,col=5)
legend("topleft",legend=c("series", "pl=2, pu=32", "pl=8, pu=40",
"pl=2, pu=60", "pl=2, pu=40"), col=1:5, lty=rep(1,5), ncol=1)
plot(unemp.bk1$cycle,
main="Baxter-King filter of unemployment: Cycle,drift=TRUE",
col=2, ylab="", ylim=range(unemp.bk3$cycle,na.rm=TRUE))
lines(unemp.bk2$cycle,col=3)
lines(unemp.bk3$cycle,col=4)
lines(unemp.bk4$cycle,col=5)
## legend("topleft",legend=c("pl=2, pu=32", "pl=8, pu=40", "pl=2, pu=60",
## "pl=2, pu=40"), col=1:5, lty=rep(1,5), ncol=1)
par(opar)
}
\keyword{ts}
\keyword{smooth}
\keyword{loess}
\keyword{nonparametric}
|
122a90e2af46181180e8a1c376bb429d2577a541 | d6b2a61d012c4ea25414604140ba1806afca12c9 | /server.R | ed0f88831861ec47ada6c9091f5605cf462f75e9 | [] | no_license | hsiujho/test1 | 5d9d22e44f6ec13495f84e9046286b32464bf19e | 001a452c1e5f50b4b5e848856a2f62e640d1e576 | refs/heads/master | 2021-01-01T15:55:14.112578 | 2016-12-12T00:44:18 | 2016-12-12T00:44:18 | 31,342,758 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,825 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
pie_fn=function(sampleid,lv,top){
z1=ya[[lv]][,c("shortname",sampleid),with=F]
setnames(z1,c("shortname",sampleid),c("Taxonomy","Reads"))
z2=z1[,.(Reads=sum(Reads)),by=Taxonomy]
z2=z2[Reads!=0]
if(length(z2[Taxonomy=="NA",Taxonomy])==0&nrow(z2)<=top){
z7=z2[order(Reads,decreasing=T)]
} else {
z3=z2[Taxonomy!="NA"]
z4=z3[order(z3[,Reads],decreasing=T)[1:min(top,nrow(z3))],Taxonomy]
z2[,top:=ifelse(Taxonomy%chin%z4,1,0)]
z5=split(z2,z2[,top])
z5[[1]]=data.table(Taxonomy="Other",Reads=z5[[1]][,sum(Reads)],top=0)
z6=rbindlist(z5)
z7=z6[order(Reads,decreasing=T)]
}
#Pie1 <- gvisPieChart(z7,labelvar = "Taxonomy", numvar = "Reads",options=list(width="800px", height="400px"))
#print(Pie1, "chart")
#return(Pie1)
return(z7)
}
output$distPlot1 <- renderPlot({
a0=pie_fn(sampleid=input$SampleID,lv=which(lvn==input$TaxoLv),top=10)
a1=a0[,Reads]
names(a1)=a0[,Taxonomy]
pie(a1)
})
output$distPlot2 <- renderPlot({
a0=pie_fn(sampleid=input$SampleID,lv=which(lvn==input$TaxoLv),top=10)
a0$pct=a0$Reads/sum(a0$Reads)*100
pie=ggplot(a0,
aes(x = 1,y = pct, fill = Taxonomy) ) +
geom_bar(width = 2,stat="identity") +
coord_polar(theta = "y") +
scale_x_discrete("")
print(pie)
})
output$gvis <- renderGvis({
a0=pie_fn(sampleid=input$SampleID,lv=which(lvn==input$TaxoLv),top=10)
gvisPieChart(a0,labelvar = "Taxonomy", numvar = "Reads",options=list(width="800px", height="400px"))
})
})
|
b1890bd752e06e64701d14c82cfc0a0f0aff8f50 | d3c8517180f3a2691c45b6d63781af9fbcc17ffc | /base.R | 2d645bbc07c6325d4ac4aafb9cadfd9086f9751f | [] | no_license | riglerandras/DevelopingDataProducts | 652e71f9d59d4752d89f83b92e165747c6e4b459 | daef82f0366ddf057ac0c27e6a1e45a240c464e3 | refs/heads/master | 2021-01-10T05:10:39.436648 | 2015-11-07T14:45:02 | 2015-11-07T14:45:02 | 45,735,486 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 692 | r | base.R | data(mtcars)
myValue = 6
filtBy <- "All"
d <- mtcars
fit <- lm(mpg~wt, data=d)
predVal <- predict(fit, newdata=data.frame(wt=myValue))
plot(d$wt, d$mpg, col="steelblue", pch=19, cex=.9,
main = paste0("Prediction of MPG based on car weight\n",
"Filter by transmission: ", filtBy ),
cex.main=.9,
xlim = c(1,6), ylim = c(5,35),
xlab="Car weight", ylab = "MPG", cex.axis=.9, cex.lab=.9)
abline(fit, col="darkred")
abline(v=myValue, col="gray")
abline(h=predVal, col="gray")
points(myValue, predVal, pch=20, cex=2, col="darkred")
text(6,30,pos=2,labels=paste("Weight:", myValue), cex=.8)
text(6,26,pos=2,labels=paste("MPG:", round(predVal, 1)), cex=.8)
|
dc333a2b6f1ad34eb57945484af9787c8860a13f | e10912d219907eb91d3e5fb34d6a62966d080d79 | /run_analysis.R | 83d5d8bcf5bde6607d4279af1822b3832fad8142 | [] | no_license | ckwong338/GCDAssignment | fffdfafb9bf3d6a114b0befb1cf45701669b4790 | 5fdd68fbe2c820171fd587c6d654ae716852ab5f | refs/heads/master | 2021-01-01T19:20:48.708810 | 2014-09-20T15:55:11 | 2014-09-20T15:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,419 | r | run_analysis.R | library(reshape2)
## read activity labels
activity_labels <-read.table("UCI HAR Dataset\\activity_labels.txt")
## read train data set
initial <- read.table("UCI HAR Dataset\\train\\X_train.txt", nrows=100)
classes <- sapply(initial, class)
X_train <- read.table("UCI HAR Dataset\\train\\X_train.txt", colClasses = classes)
y_train <- read.table("UCI HAR Dataset\\train\\y_train.txt")
subject_train <- read.table("UCI HAR Dataset\\train\\subject_train.txt")
## add in column for activity labels and subject
X_train$activity <- factor(y_train$V1, labels = activity_labels$V2)
X_train$subject <- subject_train$V1
## read test data set
initial <- read.table("UCI HAR Dataset\\test\\X_test.txt", nrows=100)
classes <- sapply(initial, class)
X_test <- read.table("UCI HAR Dataset\\test\\X_test.txt", colClasses = classes)
y_test <- read.table("UCI HAR Dataset\\test\\y_test.txt")
subject_test <- read.table("UCI HAR Dataset\\test\\subject_test.txt")
## add in column for activity labels and subject
X_test$activity <- factor(y_test$V1, labels = activity_labels$V2)
X_test$subject <- subject_test$V1
## merge data sets
X_merged <- rbind(X_train, X_test)
## read in column names
features <- read.table("UCI HAR Dataset\\features.txt")
features$V2 <- as.character(features$V2)
## add in column name for activity label
features[length(features$V2)+1,] <- c(length(features$V2)+1, "activity")
features[length(features$V2)+1,] <- c(length(features$V2)+1, "subject")
## set column names
names(X_merged) <- features$V2
## drop non mean() and std() columns
grepl_results <- grepl("mean\\(\\)|std\\(\\)|activity|subject", features$V2)
X_merged <- subset(X_merged, select = grepl_results)
## rename variable names with descriptive variable names
names(X_merged) <- sub("mean\\(\\)", "Mean", names(X_merged))
names(X_merged) <- sub("std\\(\\)", "StdDev", names(X_merged))
names(X_merged) <- sub("BodyBody", "Body", names(X_merged))
## melt X_merged to X_melt, specifying the 66 columns as variable
the_id <- c("subject", "activity")
X_melt <- melt(X_merged, id=the_id, measure.vars = names(X_merged)[c(1:66)])
## cast X_melt, subject, activity by variable, and aggregate using mean
X_dcast <- dcast(X_melt, subject + activity ~ variable, mean)
## save tidy data to text file
write.table(X_dcast, file="tidydata.txt",row.names=FALSE)
## to read tidy data text file
## X_dcast_read <- read.table("tidydata.txt",header=TRUE)
|
b32f309df91394cc6fff07cf2bd8c487d3867be6 | 139e93dc5ad1f30938195671caf4aefce99f188d | /R/map-raw.R | 3568b435f653b0bfc1577bf1d3d805bc51123ef1 | [
"MIT"
] | permissive | tidyverse/purrr | 7b94592b1eb6f4e6db8d83fc307465ce7b65b520 | ac4f5a9b9ff2b5b36770c4c5e064547264544fd2 | refs/heads/main | 2023-08-28T01:39:40.614443 | 2023-08-10T14:13:52 | 2023-08-10T14:13:52 | 27,309,729 | 901 | 265 | NOASSERTION | 2023-09-03T11:49:30 | 2014-11-29T17:33:40 | R | UTF-8 | R | false | false | 1,150 | r | map-raw.R | #' Functions that return raw vectors
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' These functions were deprecated in purrr 1.0.0 because they are of limited
#' use and you can now use `map_vec()` instead. They are variants of [map()],
#' [map2()], [imap()], [pmap()], and [flatten()] that return raw vectors.
#'
#' @keywords internal
#' @export
map_raw <- function(.x, .f, ...) {
lifecycle::deprecate_soft("1.0.0", "map_raw()", "map_vec()")
map_("raw", .x, .f, ...)
}
#' @export
#' @rdname map_raw
map2_raw <- function(.x, .y, .f, ...) {
lifecycle::deprecate_soft("1.0.0", "map2_raw()", "map2_vec()")
map2_("raw", .x, .y, .f, ...)
}
#' @rdname map_raw
#' @export
imap_raw <- function(.x, .f, ...) {
lifecycle::deprecate_soft("1.0.0", "imap_raw()", "imap_vec()")
map2_("raw", .x, vec_index(.x), .f, ...)
}
#' @export
#' @rdname map_raw
pmap_raw <- function(.l, .f, ...) {
lifecycle::deprecate_soft("1.0.0", "pmap_raw()", "pmap_vec()")
pmap_("raw", .l, .f, ...)
}
#' @export
#' @rdname map_raw
flatten_raw <- function(.x) {
lifecycle::deprecate_soft("1.0.0", "flatten_raw()")
.Call(vflatten_impl, .x, "raw")
}
|
e259f329dda313445e2a09bb186f059292e53d7d | 678c7a152cc00df4d1c38fae6c59511b417aef54 | /tests/testthat/test-resDS.R | 73c4bbf454f60475556be6a382567acb14967974 | [] | no_license | GabrielHoffman/muscat | 368c21e3caae95abe29d6b2be79b174ed4ef79c0 | 93a3d88fd4a6bacb92811f12af10b5895b912bd7 | refs/heads/master | 2023-08-24T10:27:27.795931 | 2021-10-14T17:04:31 | 2021-10-14T17:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,214 | r | test-resDS.R | # load packages
suppressMessages({
library(dplyr)
library(purrr)
library(SingleCellExperiment)
})
# generate toy dataset
seed <- as.numeric(format(Sys.time(), "%s"))
set.seed(seed)
x <- .toySCE()
nk <- length(kids <- levels(x$cluster_id))
ns <- length(sids <- levels(x$sample_id))
ng <- length(gids <- levels(x$group_id))
# sample 'n_de' genes & multiply counts by 10 for 'g2/3'-cells
g23 <- x$group_id != "g1"
de_gs <- sample(rownames(x), (n_de <- 5))
assay(x[de_gs, g23]) <- assay(x[de_gs, g23]) * 10
# aggregate & run pseudobulk DS analysis
nc <- length(cs <- list(2, 3))
pbs <- aggregateData(x, assay = "counts", fun = "sum")
y <- pbDS(pbs, coef = cs, filter = "none", verbose = FALSE)
test_that("resDS()", {
v <- list(col = list(nr = nrow(x)*nk, ng = nrow(x), nk = nk))
v$row <- lapply(v$col, "*", nc)
v$col$char_cols <- c("gene", "cluster_id")
v$row$char_cols <- c(v$col$char_cols, "coef")
for (bind in c("row", "col")) {
z <- resDS(x, y, bind, frq = FALSE, cpm = FALSE)
expect_is(z, "data.frame")
expect_identical(nrow(z), v[[bind]]$nr)
expect_true(all(table(z$gene) == v[[bind]]$nk))
expect_true(all(table(z$cluster_id) == v[[bind]]$ng))
is_char <- colnames(z) %in% v[[bind]]$char_cols
expect_true(all(apply(z[, !is_char], 2, class) == "numeric"))
expect_true(all(apply(z[, is_char], 2, class) == "character"))
}
})
test_that("resDS() - 'frq = TRUE'", {
z <- resDS(x, y, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
# remove single cluster-sample instance
s <- sample(sids, 1); k <- sample(kids, 1)
x_ <- x[, !(x$sample_id == s & x$cluster_id == k)]
y_ <- aggregateData(x_, assay = "counts", fun = "sum")
y_ <- pbDS(y_, coef = cs, verbose = FALSE)
z <- resDS(x_, y_, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
expect_true(all(z[z$cluster_id == k, paste0(s, ".frq")] == 0))
})
test_that("resDS() - 'cpm = TRUE'", {
z <- resDS(x, y, cpm = TRUE)
u <- z[, grep("cpm", colnames(z))]
expect_true(ncol(u) == ns)
expect_true(all(u %% 2 == 0 | is.na(u)))
})
test_that("missing cluster is handled", {
k <- sample(kids, 1)
i <- !(x$cluster_id == k)
pbs <- aggregateData(x[, i], verbose = FALSE)
res <- pbDS(pbs, verbose = FALSE)
tbl <- resDS(x, res, cpm = TRUE)
expect_true(!k %in% unique(tbl$cluster_id))
})
test_that("missing sample is handled", {
s <- sample(sids, 1)
i <- !(x$sample_id == s)
pbs <- aggregateData(x[, i], verbose = FALSE)
res <- pbDS(pbs, verbose = FALSE)
tbl <- resDS(x, res, cpm = TRUE)
expect_true(sum(grepl(s, names(tbl))) == 0)
})
test_that("missing cluster-sample is handled", {
k <- sample(kids, 1)
s <- sample(sids, 1)
i <- !(x$cluster_id == k & x$sample_id == s)
pbs <- aggregateData(x[, i], verbose = FALSE)
res <- pbDS(pbs, verbose = FALSE)
tbl <- resDS(x, res, cpm = TRUE)
sub <- tbl[tbl$cluster_id == k, ]
expect_true(all(is.na(sub[, grep(s, names(sub))])))
})
|
b7b15dc9a7dcc8487d7d25433f353f5e11d1e57b | fb0fd8d83376bf697ff6d5e582774745681dd5f4 | /tests/testthat/test-square.R | 89bc2e8b1b6ed63c05582c1d4578a19ae926b7bb | [
"CC0-1.0",
"MIT"
] | permissive | goldingn/greta.funprior | 616c9b7d7f1d94c61b837c57ee225be05feff8be | b5eba6fc33694eec12381ee8b95c68c74b5ae811 | refs/heads/master | 2020-06-14T22:10:58.735993 | 2019-07-04T02:14:00 | 2019-07-04T02:14:00 | 195,141,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 431 | r | test-square.R | test_that("square gives the correct answer", {
check_tf_version <- greta::.internals$utils$misc$check_tf_version
skip_if_not(check_tf_version())
x <- variable(dim = c(2, 3))
y <- square(x)
# compute the square in greta/tensorflow
x_value <- matrix(rnorm(6), 2, 3)
y_value <- calculate(y, list(x = x_value))
# compute in R
y_expected <- x_value ^ 2
# compare
expect_equal(y_value, y_expected)
})
|
132210448b135e6f1ba9403d00cf246b95e8d3f5 | c91568c244942325a47e126fbb8def6d15c79218 | /QR/modules/LoginModule/LoginModule.UI.R | 30525f907283421120f448b90aaedb937fa42704 | [] | no_license | CrisBMoya/DisenoProyect | 5c0a2eb25a78beeb45e3997266c4fd797814b700 | 6334265577fdc76cdfee3cbe51143fe842a0799d | refs/heads/master | 2020-04-30T11:27:31.460498 | 2019-05-11T00:39:19 | 2019-05-11T00:39:19 | 176,801,705 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,111 | r | LoginModule.UI.R | LoginModule.UI=function(id, label="LoginModuleUI"){
ns=NS(id)
tags$div(id="LoginUI",{
fluidPage(
tags$hr(),
column(3,
textInput(inputId="Usuario", label="Usuario", value=""),
passwordInput(inputId="Clave", label="Clave", value=""),
splitLayout(cellWidths=c("50%", "50%"),
actionButton(inputId="LoginBtn", label="Ingresa", icon=icon("sign-in-alt"),
style="color: #fff;
background-color: #47a447;
border-color: #398439;
display: inline-block;"),
actionButton(inputId="RegBtn", label="Regístrate", icon=icon("file-signature"),
style="color: #fff;
background-color: #428bca;
border-color: #357ebd;
display: inline-block;")
),
textOutput(outputId="Msg")
),
column(3),
column(3)
)
})
}
|
cb0c2abc0efad8c376bb47e6ed060376510f10cd | 23c3a18aca004781e0376a86d7f55df278d598fa | /_targets.R | d07b9abcae8d757e2eab35fa58407edab18f129c | [
"MIT",
"CC-BY-4.0"
] | permissive | jixing475/canary-ngos | 677d66f512aa0237f87927bef75017ada887e3df | a155d85f76d1681093693aced0cbcb5b6eca734a | refs/heads/master | 2023-03-31T00:21:08.077536 | 2021-04-08T12:55:41 | 2021-04-08T12:55:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,386 | r | _targets.R | library(targets)
library(tarchetypes)
library(tibble)
# General variables
csl <- "pandoc/csl/chicago-author-date.csl"
bibstyle <- "bibstyle-chicago-authordate"
suppressPackageStartupMessages(library(brms))
# By default, R uses polynomial contrasts for ordered factors in linear models
# options("contrasts")
# So make ordered factors use treatment contrasts instead
options(contrasts = rep("contr.treatment", 2))
# Or do it on a single variable:
# contrasts(df$x) <- "contr.treatment"
set.seed(9936) # From random.org
# Bayes-specific stuff
options(mc.cores = parallel::detectCores(),
brms.backend = "cmdstanr")
options(tidyverse.quiet = TRUE,
dplyr.summarise.inform = FALSE)
future::plan(future::multisession)
tar_option_set(packages = c("tidyverse", "countrycode", "states", "WDI", "here", "fs",
"readxl", "haven", "sf", "lubridate", "scales", "naniar",
"janitor", "kableExtra", "huxtable", "modelsummary",
"knitr", "withr", "flextable", "testthat", "DT",
"brms", "tidybayes", "broom", "broom.mixed", "scico",
"ggtext", "colorspace", "lme4", "cmdstanr", "jsonlite"))
source("R/funs_data-cleaning.R")
source("R/funs_knitting.R")
source("R/funs_notebook.R")
source("R/models_details.R")
source("R/models_analysis.R")
source("R/models_pts.R")
source("R/models_clphy.R")
source("R/models_clpriv.R")
# here::here() returns an absolute path, which then gets stored in tar_meta and
# becomes computer-specific (i.e. /Users/andrew/Research/blah/thing.Rmd).
# There's no way to get a relative path directly out of here::here(), but
# fs::path_rel() works fine with it (see
# https://github.com/r-lib/here/issues/36#issuecomment-530894167)
here_rel <- function(...) {fs::path_rel(here::here(...))}
list(
# Define raw data files
tar_target(chaudhry_raw_file,
here_rel("data", "raw_data", "Chaudhry restrictions", "SC_Expanded.dta"),
format = "file"),
tar_target(pts_raw_file,
here_rel("data", "raw_data", "Political Terror Scale", "PTS-2019.RData"),
format = "file"),
tar_target(journalists_raw_file,
here_rel("data", "raw_data", "Gohdes Carey journalists",
"journalist-data-incl-pts.RData"),
format = "file"),
tar_target(ucdp_raw_file,
here_rel("data", "raw_data", "UCDP PRIO", "ucdp-prio-acd-191.csv"),
format = "file"),
tar_target(vdem_raw_file,
here_rel("data", "raw_data", "Country_Year_V-Dem_Full+others_R_v10",
"V-Dem-CY-Full+Others-v10.rds"),
format = "file"),
tar_target(un_pop_raw_file,
here_rel("data", "raw_data", "UN data",
"WPP2019_POP_F01_1_TOTAL_POPULATION_BOTH_SEXES.xlsx"),
format = "file"),
tar_target(un_gdp_constant_raw_file,
here_rel("data", "raw_data", "UN data",
"UNdata_Export_20210118_034054729.csv"),
format = "file"),
tar_target(un_gdp_current_raw_file,
here_rel("data", "raw_data", "UN data",
"UNdata_Export_20210118_034311252.csv"),
format = "file"),
tar_target(naturalearth_raw_file,
here_rel("data", "raw_data", "ne_110m_admin_0_countries",
"ne_110m_admin_0_countries.shp"),
format = "file"),
tar_target(civicus_raw_file,
here_rel("data", "raw_data", "Civicus", "civicus_2021-03-19.json"),
format = "file"),
# Define helper functions
tar_target(plot_funs, here_rel("lib", "graphics.R"), format = "file"),
# Load and clean data
tar_target(world_map, load_world_map(naturalearth_raw_file)),
tar_target(civicus_clean, load_clean_civicus(civicus_raw_file)),
tar_target(civicus_map_data, create_civicus_map_data(civicus_clean, world_map)),
tar_target(skeleton, create_panel_skeleton()),
tar_target(wdi_clean, load_clean_wdi(skeleton)),
tar_target(chaudhry_clean, load_clean_chaudhry(chaudhry_raw_file)),
tar_target(pts_clean, load_clean_pts(pts_raw_file, skeleton)),
tar_target(killings_all, load_clean_journalists(journalists_raw_file)),
tar_target(ucdp_prio_clean, load_clean_ucdp(ucdp_raw_file)),
tar_target(vdem_clean, load_clean_vdem(vdem_raw_file)),
tar_target(un_pop, load_clean_un_pop(un_pop_raw_file, skeleton, wdi_clean)),
tar_target(un_gdp, load_clean_un_gdp(un_gdp_constant_raw_file,
un_gdp_current_raw_file, skeleton)),
# Combine data
# This includes 2014 for lagging/leading
tar_target(panel_with_2014, combine_data(skeleton, chaudhry_clean, pts_clean,
killings_all, ucdp_prio_clean, vdem_clean,
un_pop, un_gdp)),
# THIS is the final 2014-less data
tar_target(panel, trim_data(panel_with_2014)),
# Lagging/leading
tar_target(panel_lagged, trim_data(lag_data(panel_with_2014))),
# Training/testing
tar_target(panel_training, trim_data(create_training(panel_with_2014))), # Remove 2014
tar_target(panel_training_lagged, create_training(panel_lagged)),
tar_target(panel_testing, trim_data(create_testing(panel_with_2014))), # Remove 2014
tar_target(panel_testing_lagged, create_testing(panel_lagged)),
# Models for the political terror score (PTS_factor)
## Models using full data
tar_target(m_pts_baseline, f_pts_baseline(panel_lagged)),
tar_target(m_pts_total, f_pts_total(panel_lagged)),
tar_target(m_pts_advocacy, f_pts_advocacy(panel_lagged)),
tar_target(m_pts_entry, f_pts_entry(panel_lagged)),
tar_target(m_pts_funding, f_pts_funding(panel_lagged)),
tar_target(m_pts_v2csreprss, f_pts_v2csreprss(panel_lagged)),
tar_target(m_pts_baseline_rewb, f_pts_baseline_rewb(panel_lagged)),
tar_target(m_pts_total_rewb, f_pts_total_rewb(panel_lagged)),
tar_target(m_pts_advocacy_rewb, f_pts_advocacy_rewb(panel_lagged)),
tar_target(m_pts_entry_rewb, f_pts_entry_rewb(panel_lagged)),
tar_target(m_pts_funding_rewb, f_pts_funding_rewb(panel_lagged)),
tar_target(m_pts_v2csreprss_rewb, f_pts_v2csreprss_rewb(panel_lagged)),
## Models using training data
tar_target(m_pts_baseline_train, f_pts_baseline(panel_training_lagged)),
tar_target(m_pts_total_train, f_pts_total(panel_training_lagged)),
tar_target(m_pts_advocacy_train, f_pts_advocacy(panel_training_lagged)),
tar_target(m_pts_entry_train, f_pts_entry(panel_training_lagged)),
tar_target(m_pts_funding_train, f_pts_funding(panel_training_lagged)),
tar_target(m_pts_v2csreprss_train, f_pts_v2csreprss(panel_training_lagged)),
tar_target(m_pts_baseline_rewb_train, f_pts_baseline_rewb(panel_training_lagged)),
tar_target(m_pts_total_rewb_train, f_pts_total_rewb(panel_training_lagged)),
tar_target(m_pts_advocacy_rewb_train, f_pts_advocacy_rewb(panel_training_lagged)),
tar_target(m_pts_entry_rewb_train, f_pts_entry_rewb(panel_training_lagged)),
tar_target(m_pts_funding_rewb_train, f_pts_funding_rewb(panel_training_lagged)),
tar_target(m_pts_v2csreprss_rewb_train, f_pts_v2csreprss_rewb(panel_training_lagged)),
# Models for physical violence (v2x_clphy)
## Models using full data
tar_target(m_clphy_baseline, f_clphy_baseline(panel_lagged)),
tar_target(m_clphy_total, f_clphy_total(panel_lagged)),
tar_target(m_clphy_advocacy, f_clphy_advocacy(panel_lagged)),
tar_target(m_clphy_entry, f_clphy_entry(panel_lagged)),
tar_target(m_clphy_funding, f_clphy_funding(panel_lagged)),
tar_target(m_clphy_v2csreprss, f_clphy_v2csreprss(panel_lagged)),
tar_target(m_clphy_baseline_rewb, f_clphy_baseline_rewb(panel_lagged)),
tar_target(m_clphy_total_rewb, f_clphy_total_rewb(panel_lagged)),
tar_target(m_clphy_advocacy_rewb, f_clphy_advocacy_rewb(panel_lagged)),
tar_target(m_clphy_entry_rewb, f_clphy_entry_rewb(panel_lagged)),
tar_target(m_clphy_funding_rewb, f_clphy_funding_rewb(panel_lagged)),
tar_target(m_clphy_v2csreprss_rewb, f_clphy_v2csreprss_rewb(panel_lagged)),
## Models using training data
tar_target(m_clphy_baseline_train, f_clphy_baseline(panel_training_lagged)),
tar_target(m_clphy_total_train, f_clphy_total(panel_training_lagged)),
tar_target(m_clphy_advocacy_train, f_clphy_advocacy(panel_training_lagged)),
tar_target(m_clphy_entry_train, f_clphy_entry(panel_training_lagged)),
tar_target(m_clphy_funding_train, f_clphy_funding(panel_training_lagged)),
tar_target(m_clphy_v2csreprss_train, f_clphy_v2csreprss(panel_training_lagged)),
tar_target(m_clphy_baseline_rewb_train, f_clphy_baseline_rewb(panel_training_lagged)),
tar_target(m_clphy_total_rewb_train, f_clphy_total_rewb(panel_training_lagged)),
tar_target(m_clphy_advocacy_rewb_train, f_clphy_advocacy_rewb(panel_training_lagged)),
tar_target(m_clphy_entry_rewb_train, f_clphy_entry_rewb(panel_training_lagged)),
tar_target(m_clphy_funding_rewb_train, f_clphy_funding_rewb(panel_training_lagged)),
tar_target(m_clphy_v2csreprss_rewb_train, f_clphy_v2csreprss_rewb(panel_training_lagged)),
# Models for private civil liberties (v2x_clpriv)
## Models using full data
tar_target(m_clpriv_baseline, f_clpriv_baseline(panel_lagged)),
tar_target(m_clpriv_total, f_clpriv_total(panel_lagged)),
tar_target(m_clpriv_advocacy, f_clpriv_advocacy(panel_lagged)),
tar_target(m_clpriv_entry, f_clpriv_entry(panel_lagged)),
tar_target(m_clpriv_funding, f_clpriv_funding(panel_lagged)),
tar_target(m_clpriv_v2csreprss, f_clpriv_v2csreprss(panel_lagged)),
tar_target(m_clpriv_baseline_rewb, f_clpriv_baseline_rewb(panel_lagged)),
tar_target(m_clpriv_total_rewb, f_clpriv_total_rewb(panel_lagged)),
tar_target(m_clpriv_advocacy_rewb, f_clpriv_advocacy_rewb(panel_lagged)),
tar_target(m_clpriv_entry_rewb, f_clpriv_entry_rewb(panel_lagged)),
tar_target(m_clpriv_funding_rewb, f_clpriv_funding_rewb(panel_lagged)),
tar_target(m_clpriv_v2csreprss_rewb, f_clpriv_v2csreprss_rewb(panel_lagged)),
## Models using training data
tar_target(m_clpriv_baseline_train, f_clpriv_baseline(panel_training_lagged)),
tar_target(m_clpriv_total_train, f_clpriv_total(panel_training_lagged)),
tar_target(m_clpriv_advocacy_train, f_clpriv_advocacy(panel_training_lagged)),
tar_target(m_clpriv_entry_train, f_clpriv_entry(panel_training_lagged)),
tar_target(m_clpriv_funding_train, f_clpriv_funding(panel_training_lagged)),
tar_target(m_clpriv_v2csreprss_train, f_clpriv_v2csreprss(panel_training_lagged)),
tar_target(m_clpriv_baseline_rewb_train, f_clpriv_baseline_rewb(panel_training_lagged)),
tar_target(m_clpriv_total_rewb_train, f_clpriv_total_rewb(panel_training_lagged)),
tar_target(m_clpriv_advocacy_rewb_train, f_clpriv_advocacy_rewb(panel_training_lagged)),
tar_target(m_clpriv_entry_rewb_train, f_clpriv_entry_rewb(panel_training_lagged)),
tar_target(m_clpriv_funding_rewb_train, f_clpriv_funding_rewb(panel_training_lagged)),
tar_target(m_clpriv_v2csreprss_rewb_train, f_clpriv_v2csreprss_rewb(panel_training_lagged)),
# Big dataframe of model names for full models
tar_target(model_df, create_model_df()),
# Calculate marginal effects
tar_target(mfx_e1a_pts, generate_mfx(
tibble(model = list(m_pts_total, m_pts_advocacy, m_pts_entry, m_pts_funding),
plot_var = c("barriers_total", "advocacy", "entry", "funding"),
plot_var_nice = c("Total legal barriers", "Barriers to advocacy",
"Barriers to entry", "Barriers to funding")),
is_categorical = TRUE)),
tar_target(mfx_e1b_clphy, generate_mfx(
tibble(model = list(m_clphy_total, m_clphy_advocacy, m_clphy_entry, m_clphy_funding),
plot_var = c("barriers_total", "advocacy", "entry", "funding"),
plot_var_nice = c("Total legal barriers", "Barriers to advocacy",
"Barriers to entry", "Barriers to funding")))),
tar_target(mfx_e1c_clpriv, generate_mfx(
tibble(model = list(m_clpriv_total, m_clpriv_advocacy, m_clpriv_entry, m_clpriv_funding),
plot_var = c("barriers_total", "advocacy", "entry", "funding"),
plot_var_nice = c("Total legal barriers", "Barriers to advocacy",
"Barriers to entry", "Barriers to funding")))),
tar_target(mfx_e2a_pts, generate_mfx(
tibble(model = list(m_pts_v2csreprss),
plot_var = c("v2csreprss"),
plot_var_nice = c("Civil society repression")),
is_categorical = TRUE)),
tar_target(mfx_e2b_clphy, generate_mfx(
tibble(model = list(m_clphy_v2csreprss),
plot_var = c("v2csreprss"),
plot_var_nice = c("Civil society repression")))),
tar_target(mfx_e2c_clpriv, generate_mfx(
tibble(model = list(m_clpriv_v2csreprss),
plot_var = c("v2csreprss"),
plot_var_nice = c("Civil society repression")))),
# Build models here because they take forever
# Note tibble::lst() instead of base::list(); lst() auto-names the elements by
# their object names
tar_target(coef_list, build_coef_list()),
# Expectation 1
tar_target(models_tbl_e1a_re,
build_modelsummary(lst(m_pts_baseline, m_pts_total,
m_pts_advocacy, m_pts_entry,
m_pts_funding))),
tar_target(models_tbl_e1a_rewb,
build_modelsummary(lst(m_pts_baseline_rewb, m_pts_total_rewb,
m_pts_advocacy_rewb, m_pts_entry_rewb,
m_pts_funding_rewb))),
tar_target(models_tbl_e1b_re,
build_modelsummary(lst(m_clphy_baseline, m_clphy_total,
m_clphy_advocacy, m_clphy_entry,
m_clphy_funding))),
tar_target(models_tbl_e1c_re,
build_modelsummary(lst(m_clpriv_baseline, m_clpriv_total,
m_clpriv_advocacy, m_clpriv_entry,
m_clpriv_funding))),
# Expectation 2
tar_target(models_tbl_e2a,
build_modelsummary(lst(m_pts_baseline, m_pts_v2csreprss,
m_pts_baseline_rewb, m_pts_v2csreprss_rewb))),
tar_target(models_tbl_e2b,
build_modelsummary(lst(m_clphy_baseline, m_clphy_v2csreprss,
m_clphy_baseline_rewb, m_clphy_v2csreprss_rewb))),
tar_target(models_tbl_e2c,
build_modelsummary(lst(m_clpriv_baseline, m_clpriv_v2csreprss,
m_clpriv_baseline_rewb, m_clpriv_v2csreprss_rewb))),
# Models for paper
tar_target(models_paper_pts,
build_modelsummary(lst(m_pts_total, m_pts_advocacy, m_pts_entry,
m_pts_funding, m_pts_v2csreprss))),
tar_target(models_paper_clphy,
build_modelsummary(lst(m_clphy_total, m_clphy_advocacy, m_clphy_entry,
m_clphy_funding, m_clphy_v2csreprss))),
tar_target(models_paper_clpriv,
build_modelsummary(lst(m_clpriv_total, m_clpriv_advocacy, m_clpriv_entry,
m_clpriv_funding, m_clpriv_v2csreprss))),
# Render the analysis notebook
tar_notebook_pages(),
# tarchetypes::tar_render() automatically detects target dependencies in Rmd
# files and knits them, but there's no easy way to pass a custom rendering
# script like bookdown::html_document2(), so two things happen here:
# 1. Set a file-based target with tar_target_raw() and use tar_knitr_deps()
# to detect the target dependencies in the Rmd file
# 2. Use a bunch of other file-based targets to actually render the document
# through different custom functions
tar_target(bib_file,
here_rel("manuscript", "bibliography.bib"),
format = "file"),
tar_target_raw("main_manuscript", here_rel("manuscript", "manuscript.Rmd"),
format = "file",
deps = c("bib_file",
tar_knitr_deps(here_rel("manuscript", "manuscript.Rmd")))),
tar_target(html,
render_html(
input = main_manuscript,
output = here_rel("manuscript", "output", "manuscript.html"),
csl = csl,
bib_file,
support_folder = "output/html-support"),
format = "file"),
tar_target(pdf,
render_pdf(
input = main_manuscript,
output = here_rel("manuscript", "output/manuscript.pdf"),
bibstyle = bibstyle,
bib_file),
format = "file"),
tar_target(ms_pdf,
render_pdf_ms(
input = main_manuscript,
output = here_rel("manuscript", "output/manuscript-ms.pdf"),
bibstyle = bibstyle,
bib_file),
format = "file"),
tar_target(docx,
render_docx(
input = main_manuscript,
output = here_rel("manuscript", "output/manuscript.docx"),
csl = csl,
bib_file),
format = "file"),
tar_target(bib,
extract_bib(
input_rmd = main_manuscript,
input_bib = bib_file,
output = here_rel("manuscript", "output", "extracted-citations.bib")),
format = "file")
)
|
92e5e28b80e2c7ef8c1a22070b9a238bfa76fc76 | 7e3f188372012ed9635facb1a2a3b0bab71cef48 | /R/variable_description.r | 3f753a813a098c8485e7974e22801f8cd70a02a8 | [] | no_license | skranz/RTutor | ae637262b72f48646b013b5c6f89bb414c43b04d | f2939b7082cc5639f4695e671d179da0283df89d | refs/heads/master | 2023-07-10T03:44:55.203997 | 2023-06-23T05:33:07 | 2023-06-23T05:33:07 | 11,670,641 | 203 | 61 | null | 2020-06-17T16:11:34 | 2013-07-25T20:47:22 | R | UTF-8 | R | false | false | 4,257 | r | variable_description.r | make.var.txt.from.df = function(df, cat=TRUE) {
restore.point("make.var.txt.from.df")
var = colnames(df)
content = rep("", length(var))
if (!is.null(attributes(df)$var.labels)) {
content = attributes(df)$var.labels
}
names(content) = var
empty = which(nchar(content)==0)
content[empty] = var[empty]
orgvar = var
paste0(c("orgvar",var), " | ", c("var",var), " | ", c("descr", content))
}
make.var.txt.from.files = function(files=NULL, dir=getwd(), ext = c("dta","csv"), cat=TRUE, outfile=NULL) {
restore.point("make.var.txt.from.files")
if (is.null(files)) {
ext.regexp = paste0("(\\.",ext,"$)", collapse="|")
files = list.files(path=dir, pattern=ext.regexp, full.names=TRUE)
}
txt = lapply(files,make.var.txt.from.file, cat=FALSE)
txt = paste0(txt, collapse="\n\n")
if (cat)
cat(txt)
if (!is.null(outfile)) {
writeLines(txt, outfile)
}
txt
}
make.var.txt.from.file = function(file,...,cat=TRUE) {
restore.point("make.var.txt.from.file")
table.name = str.left.of(basename(file),".")
if (str.ends.with(file,".dta")) {
df = read.dta(file)
} else if (str.ends.with(file,".csv")) {
df = read.csv(file,...,nrows=1)
} else {
df = read.table(file,...,nrows=1)
}
return(make.var.txt.from.df(df,cat=cat))
}
examples.make.var.txt.from.files = function() {
txt.file = "D:/libraries/RTutor/examples/temp_shroudedvar.txt"
make.var.txt.from.files(dir="D:/libraries/RTutor/examples/shroudedfees", outfile = txt.file)
txt.file = "D:/libraries/RTutor/examples/shroudedvar.txt"
dt = read.var.txt(txt.file)
txt.file = "D:/libraries/RTutor/examples/bank runs variables.txt"
dt = read.var.txt(txt.file)
}
read.var.txt = function(txt.file) {
restore.point("read.var.txt")
txt = str.trim(readLines(txt.file, warn=FALSE))
ignore = str.starts.with(txt,"#") | nchar(txt)==0
txt = txt[!ignore]
txt
dt = read.table(textConnection(txt), sep="|", quote="", header=TRUE)
dt = as.data.table(lapply(dt, str.trim))
dupl = duplicated(dplyr::select(dt,orgvar,var))
dt = dt[!dupl,]
dt
}
get.var.descr.dt = function(vars=colnames(dat),dat=NULL, var.dt = get.ps()$rps$var.dt, no.descr = "- no description -") {
restore.point("get.var.descr.dt")
if (is.null(var.dt))
return(NULL)
tab = data.frame(var=vars, stringsAsFactors =FALSE)
res = left_join(tab, as.data.frame(var.dt), by="var")
res$descr[is.na(res$descr)] = no.descr
res
}
get.var.descr.markdown = function(...) {
dt = get.var.descr.dt(...)
restore.point("get.var.descr.markdown")
txt = paste0(" - `", dt$var,"`: ", dt$descr, collapse="\n\n")
txt
}
get.var.descr.html = function(...) {
txt = get.var.descr.markdown(...)
restore.point("get.var.descr.html")
html = markdownToHTML(text=txt)
html
}
# Translates variable names of a data frame
#
# uses the variable description provided with the problem set
# to change variable names from orgvar to var
# @export
translate.var.names = function(df, orgvar=var.dt$orgvar, var=var.dt$var, var.dt = get.ps()$rps$var.dt) {
restore.point("translate.var.names")
if (is.null(orgvar) | is.null(var))
return(df)
ind = which(orgvar %in% names(df))
col.ind = match(orgvar[ind],names(df))
names(df)[col.ind] = var[ind]
df
}
examples.translate.var.names.in.code = function() {
file = "Bank Runs_sol.Rmd"
var.file = "D:/libraries/RTutor/examples/bank runs variables.txt"
translate.var.names.in.code(file=file, var.file=var.file)
}
# Tool to translate automatically variable names in a .r or .rmd file
# useful to update
translate.var.names.in.code = function(
txt=readLines(file, warn=FALSE), file=NULL, out.file=file, var.file,
backup.file = paste0("backup__", sample.int(1e8,1),"__",file)
) {
if (!is.null(backup.file)) {
writeLines(txt, backup.file)
display("created backup file: ", backup.file)
}
var.dt = read.var.txt(txt.file)
if (is.null(var.dt$orgvar) | is.null(var.dt$var))
return(invisible(txt))
for (i in seq_along(var.dt$orgvar)) {
txt = gsub(var.dt$orgvar[i], var.dt$var[i],txt, fixed=TRUE)
}
txt
if (!is.null(out.file)) {
writeLines(txt, out.file)
display("wrote to file: ", out.file)
}
invisible(txt)
} |
e89501d4e4119c82f77872cdb670a8ff5e70d09b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/assertive.matrices/examples/is_diagonal_matrix.Rd.R | dd5a298161833b46fa6a2aa042eb2bcfe23374f1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 341 | r | is_diagonal_matrix.Rd.R | library(assertive.matrices)
### Name: assert_is_diagonal_matrix
### Title: Is the input a diagonal matrix?
### Aliases: assert_is_diagonal_matrix is_diagonal_matrix
### ** Examples
x <- diag(3)
is_diagonal_matrix(x)
x[1, 2] <- 100 * .Machine$double.eps
is_diagonal_matrix(x)
x[2, 3] <- 101 * .Machine$double.eps
is_diagonal_matrix(x)
|
b7c30d18f33ef1acc549eb1d1f7b33d45977e2ac | 01e473d07ba9e8353a22c39647d78c8eee272ec2 | /man/apply_attrition.Rd | ec8243420fdf91a7839546bbe99545d66d54b290 | [
"MIT"
] | permissive | bailliem/pharmavisR | 36bc8ca2c79a1ce361a57955aa1e64b6c50422dc | 3d0a1bf63c05543b9757096dc1fce0f4d9850dbe | refs/heads/master | 2023-07-20T21:56:18.811707 | 2022-08-19T06:01:46 | 2022-08-19T06:01:46 | 212,809,031 | 1 | 0 | null | 2019-10-04T12:23:17 | 2019-10-04T12:23:17 | null | UTF-8 | R | false | true | 1,005 | rd | apply_attrition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply_attrition.R
\name{apply_attrition}
\alias{apply_attrition}
\title{Apply list of inclusion/exclusion criteria to a patient-level dataframe}
\usage{
apply_attrition(data, criteria_conditions)
}
\arguments{
\item{data}{\code{data.frame}. Data set to be filtered}
\item{criteria_conditions}{\code{character} dplyr-filter compatible conditions
of the filtering criteria. These conditions will be applied to filter the input
data set and obtain final analysis data set}
}
\value{
Filtered data frame
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#questioning}{\figure{lifecycle-questioning.svg}{options: alt='[Questioning]'}}}{\strong{[Questioning]}}
Apply list of inclusion/exclusion criteria to a patient-level dataframe
}
\examples{
adtte_filtered <- visR::apply_attrition(adtte,
criteria_conditions = c(
"TRTP=='Placebo'", "AGE>=75",
"RACE=='WHITE'", "SITEID==709"
)
)
}
|
62d131d6c8f9467d456746e4097ffd249553f07e | 16b05f4c1440d8bacdca6fc1b1d2cf71db6a9f63 | /download/download_data.R | fb967013692c4447e4cd67ac566114c17ef4c8b1 | [] | no_license | stats4good/veraz_files | 55f84c4b42bb950d8dd0191a138216ea7055525a | 3cdd14dc94c0a8fd21d8773657912f4c4230879b | refs/heads/master | 2020-03-08T10:27:20.803194 | 2018-08-30T23:54:47 | 2018-08-30T23:54:47 | 128,073,804 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,580 | r | download_data.R | download_perfil <- function (dir = NULL, sec, subsec, subsubsec, year, month, day = NULL)
{
# Check inicial:
today <- c(format(Sys.Date(), "%Y"), format(Sys.Date(), "%m"), format(Sys.Date(), "%d"))
if (any(year < 2011 | year > as.numeric(today[1]))) stop("This Date is not available")
if (any(month < 1 | month > 12)) stop("This Date is not available")
if(is.null(dir)) dir <- getwd()
# Transforma??o das vari?veis:
year <- as.character(year)
month <- sprintf("%02d", month)
if (subsec == 'GastosDiretos' && subsubsec == 'Pagamentos_GastosDiretos') subsubsec <- 'GastosDiretos'
if (subsec == 'Transferencias' && subsubsec == 'Pagamentos_Transferencias') subsubsec <- 'Transferencias'
# Cria??o do Link
link <- "http://arquivos.portaldatransparencia.gov.br/downloads.asp?a=%s&m=%s&consulta=%s"
link_down = sprintf(fmt = link, year, month, subsubsec)
# Download e armazenamento dos dados:
file_name = paste(year, month, subsec, subsubsec, sep = "_")
file_direc = paste(dir, "/", file_name, ".zip", sep = "")
link_test = httr::HEAD(link_down)$headers[[3]]
dir_sec = paste(dir, "/", sec, sep = "")
dir_subsec = paste(dir_sec, "/", subsec, sep = "")
if ( (subsec == 'GastosDiretos' && subsubsec == 'Pagamentos_GastosDiretos') ||
(subsec == 'Transferencias' && subsubsec == 'Pagamentos_Transferencias') )
{
dir_subsubsec = paste(dir_subsec, "/", 'Pagamentos', sep = "")
}else{
dir_subsubsec = paste(dir_subsec, "/", subsubsec, sep = "")
}
dir_year = paste(dir_subsubsec, "/", year, sep = "")
if (link_test == "application/x-download")
{
download.file(url = link_down, destfile = file_direc, mode = "wb")
if(!dir.exists(dir_sec))
{
if (!dir.exists(dir_subsec))
{
dir.create(dir_subsec)
if (!dir.exists(dir_subsubsec))
{
dir.create(dir_subsubsec)
}
if (!dir.exists(dir_year))
{
dir.create(dir_year)
}
}
}
unzip(zipfile = file_direc, exdir = dir_year)
unlink(file_direc)
data.frame(dir = file_direc, link = link_down, link_error = 'F')
}else{
data.frame(dir = file_direc, link = link_down, link_error = 'T')
}
}
perfil <- function (secs, subsecs, subsubsecs, years, months) {
df <- vector(mode = 'list', length = length(subsecs))
for (i in seq_along(subsecs)) {
df[[i]] <- expand.grid(sec = secs,
subsec = subsecs[i],
subsubsec = subsubsecs[[subsecs[i]]],
year = years,
month = months)
}
df %>%
dplyr::bind_rows() %>%
dplyr::tbl_df()
}
# Fun??o para fazer downloads de forma recursiva:
download_portal_trans <- function (dir = NULL,
secs = 'Despesas',
subsecs = c('GastosDiretos', 'Transferencias', 'ProgramasSociais'),
subsubsecs = list('GastosDiretos' = c('Pagamentos_GastosDiretos', 'Diarias', 'CPGF', 'CPGFComprasCentralizadas', 'FavorecidosGastosDiretos', 'ConsorcioGD'),
'Transferencias' = c('Pagamentos_Transferencias', 'CPDC', 'FavorecidosTransferencias', 'ConsorcioTR'),
'ProgramasSociais' = c('BolsaFamiliaFolhaPagamento', 'BolsaFamiliaSacado', 'SeguroDefeso', 'OutrasTransferenciassCidadao')),
years = 2011:2017,
months = 1:12,
days = NULL)
{
# Criando o diret?rio para salvar os arquivos:
if (is.null(dir)) {
dir <- paste(getwd(), '/data_portal_trans', sep = '')
dir.create(dir)
} else {
dir <- paste(dir, '/data_portal_trans', sep = '')
dir.create(dir)
}
# Fun??o para fazer os downloads de forma recursiva:
f <- dplyr::failwith(dplyr::data_frame(), download_perfil)
# Obtendo os perfis e fazendo os downloads:
perfil(secs, subsecs, subsubsecs, years, months) %>%
dplyr::filter(!(year >= as.numeric(format(Sys.Date(), "%Y")) & month >= as.numeric(format(Sys.Date(), "%m")))) %>%
dplyr::group_by(sec,
subsec,
subsubsec,
year,
month) %>%
dplyr::do(f(dir = dir,
.$sec,
.$subsec,
.$subsubsec,
.$year,
.$month)) %>%
dplyr::ungroup() %>%
dplyr::tbl_df()
}
|
aa67a4e3a03e567b894991bc2530d1804906a584 | 9532a3053f6faf961ac3b2dd23aa90eece38f4cd | /man/list_members.Rd | 2dfd0659ff244867edbd93258729c5f54124ad0b | [] | no_license | luke-a/rtweet | fc4912c81f57d4d6e05f74ad07a072fb1926b210 | 13b3fa7a892bb25c6937d8939846a8dcf1b23d2a | refs/heads/master | 2021-01-23T12:06:30.908231 | 2017-10-28T12:09:00 | 2017-10-28T12:09:00 | 102,648,243 | 0 | 0 | null | 2017-09-06T19:13:17 | 2017-09-06T19:13:17 | null | UTF-8 | R | false | true | 1,510 | rd | list_members.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list-members.R
\name{list_members}
\alias{list_members}
\title{GET lists/members}
\usage{
list_members(list_id, slug = NULL, owner_user = NULL, count = 5000,
cursor = "-1", token = NULL)
}
\arguments{
\item{list_id}{required The numerical id of the list.}
\item{slug}{required You can identify a list by its slug instead of
its numerical id. If you decide to do so, note that you'll also
have to specify the list owner using the owner_id or
owner_user parameters.}
\item{owner_user}{optional The screen name or user ID of the user
who owns the list being requested by a slug.}
\item{count}{optional Specifies the number of results to return
per page (see cursor below). The default is 20, with a maximum
of 5,000.}
\item{cursor}{semi-optional Causes the collection of list members
to be broken into "pages" of consistent sizes (specified by
the count parameter). If no cursor is provided, a
value of -1 will be assumed, which is the first "page."
The response from the API will include a previous_cursor
and next_cursor to allow paging back and forth. See Using
cursors to navigate collections for more information.}
\item{token}{OAuth token. By default \code{token = NULL} fetches a
non-exhausted token from an environment variable. Find
instructions on how to create tokens and setup an environment
variable in the tokens vignette (in r, send \code{?tokens} to
console).}
}
\value{
data
}
\description{
GET lists/members
}
|
b60e495ab3c95581db24a7092e9a24ad4c1ce573 | 9453e9af1c3914b730df7737511ef970a45ad1f7 | /man/breakEdges-methods.Rd | 7d31cc51227985d62d1f80ba79c413af20ce3948 | [] | no_license | rmflight/categoryCompare | 1ce09c3ffb9ef7d5810222425419c59a2f51bca8 | 1accf65ec434a4b0a25e87aa3e3586f1085da396 | refs/heads/master | 2021-01-10T19:27:44.064062 | 2018-04-30T14:41:38 | 2018-04-30T14:41:38 | 4,077,158 | 3 | 2 | null | 2015-05-13T20:20:44 | 2012-04-19T15:58:52 | R | UTF-8 | R | false | false | 1,602 | rd | breakEdges-methods.Rd | \name{breakEdges-methods}
\docType{methods}
\alias{breakEdges}
\alias{breakEdges-methods}
\alias{breakEdges,ccCompareResult,numeric-method}
\alias{breakEdges,numeric,numeric-method}
\title{ Methods for Function \code{breakEdges} in Package \pkg{categoryCompare} }
\description{
Methods for function \code{breakEdges} in package \pkg{categoryCompare}
}
\section{Methods}{
\describe{
\item{\code{signature(cwObject = "ccCompareResult", cutoff = "numeric")}}{
Allows one to remove edges in the \code{ccCompareResult} \code{mainGraph} slot prior to passing it into Cytoscape for visualization. Given that the number of edges can be rather large (especially for Gene Ontology) this can easily speed up the transfer, without actually losing any information.
}
\item{\code{signature(cwObject = "numeric", cutoff = "numeric")}}{
Once an annotation graph is in Cytoscape, remove edges above or below the cutoff. Note that this does not affect the original graph in the \code{ccCompareResult} object.
}
}}
\author{
Robert M Flight
}
\seealso{
\code{\link{breakEdges}} \code{\linkS4class{ccCompareResult}} \code{\link{ccOutCyt}}
}
\examples{
data(ccData)
# breaking the edges in a ccCompareResult
ccResults$BP <- breakEdges(ccResults$BP, 0.8)
\dontrun{
hasCy <- (if (.Platform$OS.type \%in\% "windows") {
(length(grep("Cytoscape", system("tasklist", intern=TRUE))) > 0)})
if hasCy {
cwObj <- ccOutCyt(ccResults$BP,ccOpts)
# now breaking them in the CytoscapeWindow object
breakEdges(cwObj, 0.85)
Sys.sleep(10)
RCy3::deleteWindow(cwObj)
}
}}
\keyword{methods}
\keyword{ other possible keyword(s) }
|
8fce94eb1deedb517c2f69d6d64da2b4b4c6264e | d9f2ebf9eb28c5d698fd5d00965ceaa2657d91d0 | /man/is_case_weights.Rd | 03fdf32e8d2bd086046ddee7a7cd82d63bb7eee9 | [
"MIT"
] | permissive | tidymodels/hardhat | 6c813c76e314d97966304872b3b5074bffc7a0b1 | ac2dfd06e52bed75e4bca35fa1df0d93f3575a85 | refs/heads/main | 2023-06-22T12:18:11.297361 | 2023-03-30T12:27:21 | 2023-03-30T12:27:21 | 170,162,478 | 95 | 13 | NOASSERTION | 2023-03-29T13:52:03 | 2019-02-11T16:31:00 | R | UTF-8 | R | false | true | 648 | rd | is_case_weights.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/case-weights.R
\name{is_case_weights}
\alias{is_case_weights}
\title{Is \code{x} a case weights vector?}
\usage{
is_case_weights(x)
}
\arguments{
\item{x}{An object.}
}
\value{
A single \code{TRUE} or \code{FALSE}.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
\code{is_case_weights()} checks if \code{x} inherits from \code{"hardhat_case_weights"}.
}
\examples{
is_case_weights(1)
is_case_weights(frequency_weights(1))
}
|
e8703521cb058f7465eafb428028f0ebf0d4ec85 | b4e334a51917f17b4e06a24495c029b7be22ce51 | /man/calc_taxa_means.Rd | 0084aefe1957aa394dca6646b5e4926d21ff32dc | [] | no_license | jdiaz4302/mctoolsr | 2615e31045eda693d2d42440b76e965bcabfced2 | 4cb0db0b4f028452026f8b8e38e73f50f64d542d | refs/heads/master | 2020-12-13T18:25:48.324268 | 2016-03-31T01:53:45 | 2016-03-31T01:54:19 | 55,293,111 | 0 | 0 | null | 2016-04-02T12:23:25 | 2016-04-02T12:23:25 | null | UTF-8 | R | false | false | 851 | rd | calc_taxa_means.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/normalization.R
\name{calc_taxa_means}
\alias{calc_taxa_means}
\title{Calculate mean taxa values across a specified factor}
\usage{
calc_taxa_means(input, summarize_by_factor, metadata_map)
}
\arguments{
\item{input}{The input dataset as loaded by \code{load_taxa_table()} or
an otu table of class \code{data.frame}.}
\item{summarize_by_factor}{Category in mapping file to summarize by.}
\item{metadata_map}{[Optional]. The metadata mapping data frame. Required if
input is a \code{data.frame}.}
}
\value{
If input is a list, returns a list with a taxon table (data_loaded)
and a mapping data frame (map_loaded). It will automatically return
taxonomy in the list if provided in the input.
}
\description{
Calculate mean taxa values across a specified factor
}
|
91c79389b7a4dd3695ac1ae25911790bbf767364 | b280969ad6a27fbfac95ee5978acb0bfa3a9ece5 | /man/clean.Rd | 5ca50e3798005ed40bfb64b659bf0c78245c31b2 | [] | no_license | valeriastay/quanteda | a8029553edb4c093786e35c472a3bd18f740b842 | f61275be77f5ed2a8f96d80231c3ef1665b13394 | refs/heads/master | 2020-04-06T06:26:26.583457 | 2015-07-23T07:36:05 | 2015-07-23T07:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,674 | rd | clean.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/clean.R
\name{clean}
\alias{clean}
\alias{clean.character}
\alias{clean.corpus}
\alias{cleanC}
\title{simple cleaning of text before processing}
\usage{
clean(x, ...)
\method{clean}{character}(x, removeDigits = TRUE, removePunct = TRUE,
toLower = TRUE, removeAdditional = NULL, removeTwitter = FALSE,
removeURL = TRUE, ...)
\method{clean}{corpus}(x, removeDigits = TRUE, removePunct = TRUE,
toLower = TRUE, removeAdditional = NULL, removeTwitter = FALSE, ...)
cleanC(x, removeDigits = TRUE, removePunct = TRUE, toLower = TRUE,
removeAdditional = NULL, removeTwitter = FALSE, removeURL = TRUE, ...)
}
\arguments{
\item{x}{The object to be cleaned. Can be either a character vector or a
corpus containing texts}
\item{...}{additional parameters}
\item{removeDigits}{remove numbers if \code{TRUE}}
\item{removePunct}{remove punctuation if \code{TRUE}}
\item{toLower}{convert text to lower case \code{TRUE}}
\item{removeAdditional}{additional characters to remove (\link[=regex]{regular expression})}
\item{removeTwitter}{if \code{FALSE}, do not remove \code{@} or \code{#'}}
\item{removeURL}{removes URLs (web addresses starting with \code{http:} or \code{https:}), based
on a regular expression from \url{http://daringfireball.net/2010/07/improved_regex_for_matching_urls}}
}
\value{
A character vector equal in length to the original texts (supplied or in the corpus)
after cleaning.
}
\description{
\code{clean} is an older function used for pre-processing text, but now replaced by similar functionality in
\code{\link{tokenize}}. Please use that function instead.
}
|
cc2f87b12d9db659d173ca60905b60f3f776a43c | f22407c2401719a7e55cc2fae296b301df0f032b | /Assginment-Text-Mining-Twitter.R | 959f0e020bfaee06fdbd1099def2e59ae1a56ca4 | [] | no_license | AnuradhaNagar/DS_Rcode | c813ba1cad264ad9e44c1604bf59283776d929c6 | b385933b8ef67b0514e19ae08fbc2e1553c7d522 | refs/heads/main | 2023-01-08T02:31:07.334382 | 2020-11-07T12:06:01 | 2020-11-07T12:06:01 | 310,835,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,740 | r | Assginment-Text-Mining-Twitter.R | library(tm)
library(readr)
require(graphics)
library("ROAuth")
library("twitteR")
cred <- OAuthFactory$new(consumerKey='2NFXXXXXXXXBb2Gdar',
consumerSecret='Z6h7aXXXXXXXX04kWznH0XrZ638PdM6Ix',
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
setup_twitter_oauth("eOtcK9qJpjcObGUPv6SfTFZFE",
"TedGta8dDigiQcpKJ68AleIlfaHL7nqdVdCH2qppgD7wLYE0Tm",
"896758586243653632-6RCrl6Yia8uR0YbjBor38RxbjCWCqFm",
"QYLajXrqdbMYFKwno9itPNo4LVPsVuccOBSyxzfMfdCUF")
Tweets <- userTimeline('narendramodi', n = 10)
#View(Tweets)
TweetsDF <- twListToDF(Tweets)
str(TweetsDF)
View(TweetsDF)
#Data<-head(TweetsDF$text)
##Data<-TweetsDF$text
#View(Data)
#write.table(Data,"modi1.txt")
#getwd()
#text<-readLines("modi1.txt", skipNul = TRUE)
#View(text)
corpus <- Corpus(VectorSource(TweetsDF$text))
# clean up the corpus using tm_map()
corpus_clean <- tm_map(corpus, (tolower))
inspect(corpus_clean)
corpus_clean <- tm_map(corpus_clean, removeNumbers)
stopwords("english")
corpus_clean <- tm_map(corpus_clean, removeWords, stopwords())
inspect(corpus_clean)
corpus_clean <- tm_map(corpus_clean, removePunctuation)
#inspect(corpus_clean)
corpus_clean <- tm_map(corpus_clean, stripWhitespace)
inspect(corpus_clean)
dtm <- TermDocumentMatrix(corpus_clean,control = list(minWordLength=c(1,Inf)))
findFreqTerms(dtm, lowfreq = 2)
m<-as.matrix(dtm)
wordFreq <- sort(rowSums(m), decreasing=TRUE)
install.packages("wordcloud")
library(wordcloud)
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq = 10, random.order = F, col=gray.colors(1))
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq = 10, random.order = F, colors=rainbow(20))
pos=scan(file.choose(), what="character", comment.char=";")
neg=scan(file.choose(), what="character", comment.char=";")
#Sentiment analysis
library("syuzhet")
s_v <- get_sentences(TweetsDF$text)
class(s_v)
str(s_v)
(s_v)
sentiment_vector <- get_sentiment(s_v, method = "bing")
head(sentiment_vector)
sentiment_vector
afinn_s_v <- get_sentiment(s_v, method = "afinn")
head(afinn_s_v)
nrc_vector <- get_sentiment(s_v, method="nrc")
head(nrc_vector)
sentiment_vector
sum(sentiment_vector)
mean(sentiment_vector)
summary(sentiment_vector)
# To extract the sentence with the most negative emotional valence
negative <- s_v[which.min(sentiment_vector)]
negative
# and to extract the most positive sentence
positive <- s_v[which.max(sentiment_vector)]
positive
|
71f3ad7cf6f1d767d5d1b3bb2da4d0ec0d8b375e | ef3c8b0609b38ab0388babe411eb4eccaa4d94b4 | /HarmJanMatrix.R | 39c8fc40bf14e44bb8fec314dd720d5edb567ded | [] | no_license | DannyArends/ludeCode | d540c1a1d3e9373fe2beaf69a80390c483eb01c0 | ff109c9fd5fa4204805e3ea672c749189d6ed670 | refs/heads/master | 2021-01-10T19:36:54.675900 | 2013-07-19T13:07:05 | 2013-07-19T13:07:05 | 10,618,682 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,121 | r | HarmJanMatrix.R | setwd("~/Github/Juha/")
ExpCorMatrixHJ <- read.csv("EGCUTEndophenotypesValidSamples-NoSex.txt-asLoadedByNormalizerEndophenotypeVsExpressionCorrelationMatrix-Ensembl.txt", sep='\t', row.names=1)
TScores <- read.csv("TScores5CellTypes.txt", row.names=1)
ExpCorMatrixHJ <- ExpCorMatrixHJ[which(rownames(ExpCorMatrixHJ) %in% rownames(Tscores)),]
TScores <- TScores[which(rownames(TScores) %in% rownames(ExpCorMatrixHJ)),]
sortHJ <- match(rownames(TScores), rownames(ExpCorMatrixHJ))
bound <- cbind(ExpCorMatrixHJ[sortHJ,], TScores)
corrs <- cor(bound, method="spearman")
rownames(corrs)[20:24] <- paste0(rownames(corrs)[20:24],"_Danny")
write.table(corrs,file="Tscores_vs_EGCUTEndophenotypesValidSamples.txt",sep='\t',quote=FALSE)
namez <- colnames(bound)
for(x in 20:24){
for(y in 10:14){
xlab=paste0("Tscore ",namez[x])
ylab=paste0("Endophenotype ",namez[y])
main=paste0(namez[x]," ", namez[y], ", Cor: ", round(corrs[x,y],d=2))
png(file=paste0(namez[x],"_",namez[y],".png"),width=1024,height=800)
plot(bound[,x], bound[,y], xlab=xlab, ylab=ylab, main=main, pch=19, cex=0.6)
dev.off()
}
}
|
2eb757326975de2a05885dc0ebf801b7639d0bc1 | 8f9501ff4cf822ae70daca3fff81139dead6f4b9 | /Hypothesis2.R | 355956544cf203e5430f76a40d2398cd43824c46 | [] | no_license | Sharath2596/Hypothesis | 579c2af404fa29de4c2f189080223c898e089ba5 | 050041392b2459c086966d6f7cc9a91c654c7800 | refs/heads/main | 2023-08-09T20:30:45.802291 | 2023-07-24T11:41:38 | 2023-07-24T11:41:38 | 338,017,093 | 0 | 0 | null | 2023-07-24T11:25:24 | 2021-02-11T12:02:49 | Jupyter Notebook | UTF-8 | R | false | false | 1,803 | r | Hypothesis2.R | # A hospital wants to determine whether there is any difference in the average Turn Around Time (TAT) of reports of the laboratories on their preferred list. They collected a random sample and recorded TAT for reports of 4 laboratories. TAT is defined as sample collected to report dispatch.
# Analyze the data and determine whether there is any difference in average TAT among the different laboratories at 5% significance level.
library(readr)
LabTAT <- read_csv("LabTAT.csv")
View(LabTAT)
attach(LabTAT)
# Normality test
shapiro.test(`Laboratory 1`)
# p-value 0.5508 > 0.05 so p high null fly
# It follows Normal distribution
shapiro.test(`Laboratory 2`)
# p-value 0.8637 > 0.05 so p high null fly
# It follows Normal distribution
shapiro.test(`Laboratory 3`)
# p-value 0.4205 > 0.05 so p high null fly
# It follows Normal distribution
shapiro.test(`Laboratory 4`)
# p-value 0.6619 > 0.05 so p high null fly
# It follows Normal distribution
install.packages("car")
library(car)
qqPlot(`Laboratory 1`)
qqPlot(`Laboratory 2`)
qqPlot(`Laboratory 3`)
qqPlot(`Laboratory 4`)
# Variance test
var.test(`Laboratory 1`, `Laboratory 2`)
var.test(`Laboratory 2`, `Laboratory 3`)
var.test(`Laboratory 3`, `Laboratory 4`)
var.test(`Laboratory 4`, `Laboratory 1`)
# p-value for all 4 laboratories > 0.05 so p high null fly
# Equal variances
# ANOVA Test
# Null hypothesis <- Average of all laboratories are same
# ALternative hypothesis <- Atleast 1 laboratories are different
stacked_data <- stack(LabTAT)
View(stacked_data)
attach(stacked_data)
Anova_result <- aov(values~ind, data = stacked_data)
summary(Anova_result)
# p value is less than 0.05
# accept alternative hypothesis
# Hence average of atleast 1 laboratories are different |
23ccabb35bbdebabc975bf88b28db59a5e75ab32 | 0217dd9843c9462108ebf43d5b5f6feff1bbb65c | /man/api_get_cohorts.Rd | e702c47bed4c4ca44625c3ac2333ba6a55cd20c9 | [] | no_license | aneuraz/DWHtools2 | 44f833f07dc15735d4134e57ce119a90949ec38c | 4c3eb585c331c680b4212352215708a763a54c71 | refs/heads/master | 2021-03-30T17:20:43.931314 | 2019-09-06T09:55:33 | 2019-09-06T09:55:33 | 90,238,636 | 0 | 1 | null | 2019-09-06T09:54:38 | 2017-05-04T08:18:04 | R | UTF-8 | R | false | true | 241 | rd | api_get_cohorts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dwh_api_client.R
\name{api_get_cohorts}
\alias{api_get_cohorts}
\title{api_get_cohorts}
\usage{
api_get_cohorts(base_url, token)
}
\description{
api_get_cohorts
}
|
cac85ca4865de20f0f1ca98ee05d88aa4f82698b | dab7e577d31db1854b831c8c9bdfa9d1f9f5345b | /exercise-2/ui.R | 7a629d4a0372aa422c7366b04a72c94581bea152 | [
"MIT"
] | permissive | ZKeiLin/m18-shiny | 7656def03e0be6154e37bc7db9beb9922a8272e9 | 00577b1a11355a4f8d0bcdc23835027faac7bf4d | refs/heads/master | 2020-12-30T16:45:41.639321 | 2017-05-11T22:18:23 | 2017-05-11T22:18:23 | 91,021,940 | 0 | 0 | null | 2017-05-11T20:45:10 | 2017-05-11T20:45:10 | null | UTF-8 | R | false | false | 492 | r | ui.R | # UI for scatterplot
# Create a shinyUI with a fluidPage layout
shinyUI(fluidPage(
# Add a numbericInput to choose the number of observations
selectInput("# Observations", label = h3("100"),
choices = list("100" = 1, "200" = 2, "300" = 3),
selected = 1),
hr(),
fluidRow(column(3, verbatimTextOutput("100")))
# Add a selectInput that allows you to select a color from a list of choices
# Plot the output with the name "scatter"
)) |
1800f1ca55e62325e0a9516ae48916e9fa267280 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610030994-test.R | 90bee3084ceb83098240c5092a1575abcfb1c90d | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | r | 1610030994-test.R | testlist <- list(id = integer(0), x = c(1.34497461904945e-284, 4.82194077500418e+233, 2.29982995267468e-312, 2.12199575391035e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
6b20dd357b438c02f74928634bec7c0c627ed6b7 | a0aa511780814e67695203dec46022eda1c75467 | /man/chclass.Rd | 3ee16c430190b4887a4f2babdf78ce6075b09177 | [] | no_license | leonpheng/xptdefine | ab031216806bbb22e8fbf7b5b56ac5fcfba726a3 | 62388d9927436fac5df9a05305bf6bffc2124e56 | refs/heads/master | 2020-04-05T02:29:19.060784 | 2019-11-19T15:39:29 | 2019-11-19T15:39:29 | 114,052,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 261 | rd | chclass.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{chclass}
\alias{chclass}
\title{chclass}
\usage{
chclass(data, var, class = "char")
}
\description{
internal used function.
}
\examples{
chclass()
}
\keyword{chclass}
|
984e5fd5eb11dfe149d74530289ee443318d48f9 | 9dce2b270982265965679dda60996f5130eb1442 | /Pocket_Nutritionist_Better_More_Interactive/server.R | 1fe8435c1ff3e34ceabc4c994174ab7f3e51a770 | [] | no_license | matthew-boccio/Pocket_Nutritionist_2 | 8c70fcf5c52b2ca82fa3b67ab90082a550a3f7df | d62b2abad102f65273eb4ae603613227f6029706 | refs/heads/master | 2022-05-25T12:50:48.762901 | 2020-04-28T19:59:07 | 2020-04-28T19:59:07 | 259,152,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,800 | r | server.R | function(input, output, session) {
food_choice = reactive({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
filter(Preparation == input$Preparation) %>%
filter(Additions == input$Additions)
})
output$Food_Pick = renderUI({
selectizeInput("Food", label = h4("Select Food"),
choices = c("choose" = "", unique(food_full$Food)))
})
output$Type_Pick = renderUI({
type_depend = reactive({
food_full %>%
filter(Food == input$Food) %>%
pull(Type) %>%
as.character() })
selectizeInput("Type", label = h4("Select Type"),
choices = c("choose" = "", type_depend()))
})
output$Prep_Pick = renderUI({
prep_depend = reactive ({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
pull(Preparation) %>%
as.character() })
selectizeInput("Preparation", label = h4("Select Preparation"),
choices = c("choose" = "", prep_depend()))
})
output$Add_Pick = renderUI({
add_depend = reactive ({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
filter(Preparation == input$Preparation) %>%
pull(Additions) %>%
as.character() })
selectizeInput("Additions", label = h4("Select Additions"),
choices = c("choose" = "", add_depend()))
})
food_choice_for_compare = reactive({
food_choose = food_full %>%
filter(Food == input$Food
& Type == input$Type
& Preparation == input$Preparation
& Additions == input$Additions) %>%
group_by(Food) %>%
summarize(Avg_Total_Calories_per_100g = round(mean(Calories)),
Calories_of_Carbs = round(mean(Total_Carbs_.g.) * 4),
Calories_of_Sugar = round(mean(Total_Sugar_.g.) * 4),
Calories_of_Fat = round(mean(Total_Fat_.g.) * 9),
Calories_of_Saturated_Fat = round(mean(Saturated_Fat_.g.) * 9),
Calories_of_Protein = round(mean(Protein_.g.) * 4)) %>%
mutate(Food = "SELECTED FOOD")
add_choose_to_compare = rbind(food_compare_base, food_choose)
comparer = add_choose_to_compare %>%
filter(Food == input$Food
| Food == "SELECTED FOOD")
comparer = comparer %>%
gather(Avg_Total_Calories_per_100g,
Calories_of_Carbs,
Calories_of_Sugar,
Calories_of_Fat,
Calories_of_Saturated_Fat,
Calories_of_Protein,
key = "Calorie_Type", value = "Calorie_Amount")
})
output$compfood = renderPlot(
food_choice_for_compare() %>%
ggplot(aes(x = Calorie_Type, y = Calorie_Amount)) +
geom_col(aes(fill = Food), position = 'dodge') +
coord_flip() + ylim(0, 900)
)
#----------------------------------------------------------
food_choice_2 = reactive({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
filter(Preparation == input$Preparation) %>%
filter(Additions == input$Additions)
})
output$Food_Pick_2 = renderUI({
selectizeInput("Food", label = h4("Select Food"),
choices = c("choose" = "", unique(food_full$Food)))
})
output$Type_Pick_2 = renderUI({
type_depend = reactive({
food_full %>%
filter(Food == input$Food) %>%
pull(Type) %>%
as.character() })
selectizeInput("Type", label = h4("Select Type"),
choices = c("choose" = "", type_depend()))
})
output$Prep_Pick_2 = renderUI({
prep_depend = reactive ({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
pull(Preparation) %>%
as.character() })
selectizeInput("Preparation", label = h4("Select Preparation"),
choices = c("choose" = "", prep_depend()))
})
output$Add_Pick_2 = renderUI({
add_depend = reactive ({
food_full %>%
filter(Food == input$Food) %>%
filter(Type == input$Type) %>%
filter(Preparation == input$Preparation) %>%
pull(Additions) %>%
as.character() })
selectizeInput("Additions", label = h4("Select Additions"),
choices = c("choose" = "", add_depend()))
})
food_choice_for_diet = reactive({
food_choose_2 = food_full %>%
filter(Food == input$Food
& Type == input$Type
& Preparation == input$Preparation
& Additions == input$Additions) %>%
group_by(Food) %>%
summarize(Avg_Total_Calories_per_100g = round(mean(Calories)),
Calories_of_Carbs = round(mean(Total_Carbs_.g.) * 4),
Calories_of_Sugar = round(mean(Total_Sugar_.g.) * 4),
Calories_of_Fat = round(mean(Total_Fat_.g.) * 9),
Calories_of_Saturated_Fat = round(mean(Saturated_Fat_.g.) * 9),
Calories_of_Protein = round(mean(Protein_.g.) * 4)) %>%
mutate(Food = "SELECTED FOOD")
food_choose_2 = food_choose_2 %>%
select(-Avg_Total_Calories_per_100g) %>%
mutate(Calories_of_Carbs = (Calories_of_Carbs - Calories_of_Sugar)) %>%
mutate(Calories_of_Fat = (Calories_of_Fat - Calories_of_Saturated_Fat)) %>%
gather(Calories_of_Carbs,
Calories_of_Sugar,
Calories_of_Fat,
Calories_of_Saturated_Fat,
Calories_of_Protein,
key = "Calorie_Type", value = "Calorie_Amount")
})
output$dietplan = renderPlot(
food_choice_for_diet() %>%
ggplot(aes(x = Food, y = Calorie_Amount)) +
geom_col(aes(fill = Calorie_Type)) +
geom_text(aes(label = Calorie_Amount), size = 3, position = position_stack(vjust = 0.5))
)
#-------------------------------------------------------------------------------------------
output$Calorie_Needs = renderTable(
all_calorie_needs
)
output$Food_Table = renderTable(
food_full
)
output$food_by_category = renderPlot(
food_compare_main_graph %>%
filter(Calorie_Type == input$food_comparison) %>%
ggplot(aes(x = Food, y = Average_Calorie_Amount_Per_100g), fill = "red") +
geom_bar(stat = "identity")
)
output$food_by_type = renderPlot(
food_compare_type_graph %>%
filter(Calorie_Type == input$food_type_comparison,
Food == input$type_picked) %>%
ggplot(aes(x = Type, y = Average_Calorie_Amount_Per_100g), fill = "red") +
geom_bar(stat = "identity") + coord_flip()
)
output$food_by_cat = renderTable(
food_compare_cat_table %>%
filter(Calorie_Type == input$food_cat_comparison,
Type == input$cat_picked)
)
}
|
54a785903e0618460cf02db5dcb030f0f1f6f9c5 | 3fc582b5ab0a1d2f778fd2582ce7c0bb670aa11d | /man/RootDevelopment.Rd | 545c19aa85f8d5423ab3a898d7803f40ababf6e3 | [] | no_license | bluegulcy/aquacrop | 43b56a8becb9244fe3028af853f3c19c99bdd5dd | 50a1d815d04259e8792aba763fc53e0f896bf1a0 | refs/heads/master | 2020-09-02T10:55:40.510230 | 2019-10-08T10:03:12 | 2019-10-08T10:03:12 | 219,206,208 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 694 | rd | RootDevelopment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RootDevelopment.R
\name{RootDevelopment}
\alias{RootDevelopment}
\title{Calculate root zone expansion}
\usage{
RootDevelopment(Crop, Soil, GroundWater, InitCond, GDD, GrowingSeason)
}
\arguments{
\item{Crop}{Parameters for a given crop}
\item{Soil}{properties of soil}
\item{InitCond}{Crop setting initial structure}
\item{GDD}{Growing degree days}
\item{GrowingSeason}{crop developmental stage}
\item{Groundwater}{ground water table}
}
\value{
\code{NewCond} for a n time-step.
}
\description{
Calculate root zone expansion
}
\examples{
RootDevelopment(Crop, Soil, GroundWater, InitCond, GDD, GrowingSeason)
}
|
c24039a06bd838e4ff58e3f39745733c50eb9b76 | 0b4e456aed1637f0cb6e08ad49b7392867b9aec8 | /man/generateRandomPrimers.Rd | 0c5b6f5a3bb23d9f1ecee5af1b5b9677694331bd | [] | no_license | sherrillmix/ampCountR | 88854a1a9e24f77a27b42a0ee1e9dd7a02bcd3a8 | 2c7eb3a8d19f6221a73459ad6fb81f93e0e18875 | refs/heads/master | 2020-12-25T22:08:22.048533 | 2016-10-21T11:07:32 | 2016-10-21T11:07:32 | 37,539,674 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 646 | rd | generateRandomPrimers.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ampCounter.R
\name{generateRandomPrimers}
\alias{generateRandomPrimers}
\title{Generate random primers for testing}
\usage{
generateRandomPrimers(genomeSize, frequency)
}
\arguments{
\item{genomeSize}{Max position for primers}
\item{frequency}{About how much space between primers. Function will generate approximately genomeSize/frequency primers}
}
\value{
sorted list of positions
}
\description{
Generate random position for primers. Mostly for internal testing and examples
}
\examples{
ampcountr:::generateRandomPrimers(10000,100)
}
\keyword{internal}
|
3e15d6a1594cc02658a4686b0bf2b27ef404f9ca | 30687b56de537b3f05886f2326eeb26e0f166459 | /BMI_analysis.R | d704f25582b100ef36ca62f787c799db35bf9c54 | [] | no_license | caiyuntingcfrc/TwinCity | 088754650125ad2648722aeef9fc9ef8a44b0fc1 | 1e1950d40fb35f8756861daba5eb2cdc58035796 | refs/heads/master | 2021-07-23T17:40:42.492939 | 2020-05-07T22:47:14 | 2020-05-07T22:47:14 | 163,916,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,739 | r | BMI_analysis.R |
# Prep and Options --------------------------------------------------------
# rm
rm(list = ls())
# set working directory
setwd("D:/R_wd/Twin Cities/")
# list of packages
list.packages <- c("tidyverse", "magrittr", "haven",
"gridExtra", "summarytools", "tabulizer",
"labelled", "DescTools", "userfriendlyscience",
"stargazer", "sjPlot")
# check if the packages are installed
new.packages <- list.packages[!(list.packages %in% installed.packages()[ , "Package"])]
# install new packages
if(length(new.packages)) install.packages(new.packages)
# load the packages
lapply(list.packages, require, character.only = TRUE)
# remove lists
rm(list.packages, new.packages)
# options
options(scipen = 999)
# options for summarytools
st_options(style = "simple",
round.digits = 4,
ctable.prop = "c")
# Read the file -----------------------------------------------------------
df <- read_sav("Taipei_all.sav", encoding = "UTF-8")
# Desc --------------------------------------------------------------------
# BMI * sex
d <- df
attr(d$Bd23, "label") <- "性別"
sjt.xtab(d$ovrw, # row
d$Bd23, # col
# encoding and digits
encoding = "UTF-8", digits = 2,
# title
title = "雙城大台北地區 — BMI標準 X 性別",
# show
show.cell.prc = TRUE, show.legend = TRUE, show.exp = FALSE,
string.total = "總和", emph.total = TRUE, file = "sex.doc")
# BMI & grade
sjt.xtab(d$ovrw, # row
d$grade, # col
# encoding and digits
encoding = "UTF-8", digits = 2,
# title
title = "雙城大台北地區 — BMI標準 X 年級",
# show
show.cell.prc = TRUE, show.legend = TRUE, show.exp = FALSE,
string.total = "總和", emph.total = TRUE, file = "grade.doc")
# BMI & disabilities
attr(d$Ch3, "label") <- "疾病狀況"
names(attr(d$Ch3, "labels")) <- c("沒有", "有", "不知道", "不確定", "拒答")
sjt.xtab(d$ovrw, # row
d$Ch3, # col
# encoding and digits
encoding = "UTF-8", digits = 2,
# title
title = "雙城大台北地區 — BMI標準 X 身心狀況",
# show
show.cell.prc = TRUE, show.legend = TRUE, show.exp = FALSE,
string.total = "總和", emph.total = TRUE, file = "disability.doc")
# corr age, sex and BMI
d <- df %>% select(c(realage, Bd23, ovrw))
attr(d$realage, "label") <- "年齡"
attr(d$Bd23, "label") <- "性別"
sjt.corr(d, corr.method = "pearson", encoding = "UTF-8",
p.numeric = TRUE,
fade.ns = TRUE, file = "corr.doc")
|
eef78d2475873b97599a2b8405e8ddf0f35ff6d4 | de1d0064b49664f711f217028046cc73007d5d9f | /R/mutes_users_list.R | 3c8e2e8d85fa5c7d167e74a974614f08e21f467a | [] | no_license | yutannihilation/twihttr | aeeab32c31372dc345e5a008127ca02e6bf3a4f9 | 5bf5b7a7792c7e82e5865310e668e6b96a89bcc4 | refs/heads/master | 2021-01-20T21:06:10.949602 | 2016-11-30T03:16:14 | 2016-11-30T03:16:14 | 65,869,824 | 0 | 0 | null | null | null | null | ISO-8859-4 | R | false | false | 1,247 | r | mutes_users_list.R | #' GET mutes/users/list
#'
#' @seealso \url{https://dev.twitter.com/rest/reference/get/mutes/users/list}
#' @param cursor
#' Causes the list of IDs to be broken into pages of no more than 5000 IDs at a time. The number of
#' IDs returned is not guaranteed to be 5000 as suspended users are filtered out after connections are
#' queried. If no cursor is provided, a value of -1 will be assumed, which is the first gpage.h The
#' response from the API will include a previous_cursor and next_cursor to allow paging back and forth.
#' See [node:10362, title=hUsing cursors to navigate collectionsh] for more information.
#' Example Values: 2
#' @param include_entities
#' The entities node will not be included when set to false.
#' Example Values: false
#' @param skip_status
#' When set to either true, t or 1 statuses will not be included in the returned user objects.
#' Example Values: true
#' @export
twtr_mutes_users_list <- function(cursor = NULL, include_entities = NULL, skip_status = NULL, ...) {
twtr_api("GET", "https://api.twitter.com/1.1/mutes/users/list.json", query = list(cursor = cursor, include_entities = include_entities,
skip_status = skip_status, ...))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.