blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16f1f22d8e05e5d4650dd93d110229a9ca11cc7c
|
5e9288942f83508ff3ff9a3543cb6639c9e2ee3b
|
/R-package/data/aloha.R
|
f13839d17de0f65d3756c0780f5be2cf0479d40f
|
[
"BSD-2-Clause"
] |
permissive
|
omnetpp/omnetpp-resultfiles
|
e66af451d177942532d453171a5264e61f606a8f
|
b956990f747f5d099ff11c49b366aa9ced73cf1f
|
refs/heads/master
| 2021-11-10T20:19:29.670601
| 2020-02-03T12:54:15
| 2020-02-03T12:54:15
| 719,992
| 22
| 17
| null | 2020-01-30T17:19:29
| 2010-06-14T12:24:41
|
C++
|
UTF-8
|
R
| false
| false
| 204
|
r
|
aloha.R
|
require(omnetpp)
aloha <- loadDataset(c(file.path(system.file('extdata',package='omnetpp'),'PureAloha*.sca'),
file.path(system.file('extdata',package='omnetpp'),'PureAloha*.vec')))
|
b12c627f829551dd10021a27e030f1764664ff6a
|
afb95602e7403f2c9d9834dd1d6c8373c74102d3
|
/Scripts/coproduction_preparation_rev.R
|
21373f0a0438ac598a10caa3aa815842f5c03f2a
|
[] |
no_license
|
matthiasschroeter/Coproduction_crops
|
0ed86c93330b9580e16a940127681057fb773cc5
|
f811e92358982bee72536e8661b200ba9b0444ff
|
refs/heads/master
| 2023-04-12T15:38:53.786693
| 2021-04-25T15:02:18
| 2021-04-25T15:02:18
| 293,589,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82,424
|
r
|
coproduction_preparation_rev.R
|
## DATA PREPARATION: INTEGRATE FAO DATA, IFA DATA AND OTHER DATA SOURCES
############################################################################################################
##### First Table, 11 crop groups #####
############################################################################################################
##### load data for table 1 #####
Crops<-read.csv("Production_Crops_E_All_Data_(Normalized).csv",sep=",",header=T)
# cut the different elements out of the table "crops" and create two tables (Area_havested and Production)
# rename the Value columns into area_havested/Production (do the same with the Unit and Flag columns)
# reduce the tables to the needed columns
# merge the three tabels
Area_harvested<- Crops[which(Crops$Element =='Area harvested'),]
colnames(Area_harvested)[colnames(Area_harvested)=="Value"] <- "Area_harvested_ha"
Area_harvested_k<-Area_harvested[,c("Area","Item","Year","Area_harvested_ha")]
Production<- Crops[which(Crops$Element =='Production'),]
colnames(Production)[colnames(Production)=="Value"] <- "Production_t"
Production_k<-Production[,c("Area","Item","Year","Production_t")]
# merge the first two tabels
Area_Production <- merge(Area_harvested_k,Production_k, by=c("Area","Item","Year"),all=T)
################ save table for a shortcut ######################################################
# write.csv(Area_Production,file="Area_Production.csv")
Area_Production<-read.csv("Area_Production.csv",sep=",",header=T)
#####################################################################################################
# select the years for which we have fertilizer data (2000, 2006, 2010 and 2014)
Area_Production_years<-Area_Production[Area_Production$Year %in% c("2000","2006","2010","2014"), ]
################ save table for a shortcut ######################################################
write.csv(Area_Production_years,file="Area_Production_years.csv")
Area_Production_years<-read.csv("Area_Production_years.csv",sep=",",header=T)
#####################################################################################################
Area_Production_years$Area<-paste(gsub("[ ]","_",Area_Production_years$Area))
# select the countries for which fertilizer data is available
countries_IFA<-Area_Production_years[Area_Production_years$Area %in% c(
"Albania", "Algeria", "Argentina", "Australia", "Azerbaijan", "Bangladesh", "Belarus",
"Bolivia_(Plurinational_State_of)","Brazil", "Cambodia", "Canada", "Chile", "China,_mainland",
"Colombia", "Costa_Rica", "Dominican_Republic", "Ecuador", "Egypt", "El_Salvador", "Ethiopia",
"Guatemala", "Guinea", "Honduras", "India", "Indonesia", "Israel", "Japan", "Jordan", "Kenya", "Kuwait",
"Lao_People's_Democratic_Republic", "Lebanon" ,"Madagascar" ,"Malawi" ,"Malaysia" ,"Mauritania" ,"Mexico",
"Morocco", "Myanmar", "New_Zealand", "Nicaragua", "Nigeria", "Norway", "Pakistan", "Paraguay",
"Philippines", "Republic_of_Korea", "Republic_of_Moldova", "Saudi_Arabia", "South_Africa", "Sri_Lanka",
"Switzerland", "Syrian_Arab_Republic", "Thailand", "Togo", "Turkey", "United_Republic_of_Tanzania",
"United_States_of_America", "Uruguay", "Venezuela_(Bolivarian_Republic_of)", "Viet_Nam", "Zambia",
"Zimbabwe", "Iran_(Islamic_Republic_of)", "Russian_Federation", "Ukraine", "Uzbekistan",
"Austria","Belgium","Bulgaria","Cyprus","Czechia","Denmark","Estonia","Finland","France","Germany",
"Greece","Hungary","Ireland","Italy","Latvia","Lithuania","Luxembourg","Malta","Netherlands",
"Poland","Portugal","Romania", "Slovakia","Slovenia","Spain","Sweden","United Kingdom","Croatia"), ]
unique(countries_IFA$Area)
################ save the table for a shortcut ######################################################
write.csv(countries_IFA,file="countries_IFA.csv")
countries_IFA<-read.csv("countries_IFA.csv",sep=",",header=T)
#####################################################################################################
#####################################################################################################
##### Crop selection #####
#####################################################################################################
# we need this 11 crop group table later, because fertilizer use is provided for crop groups
# unique Items
unique(countries_IFA$Item)
# Crop selection
crops_11CG_20<-countries_IFA[countries_IFA$Item %in% c(
"Wheat","Bran, wheat","Bulgur","Flour, wheat",
"Rice, paddy","Oil, rice bran","Cake, rice bran",
"Maize","Maize, green","Bran, maize","Cake, maize","Flour, maize","Germ, maize","Oil, maize","Popcorn",
"Sweet corn frozen","Sweet corn prep or preserved",
"Barley","Barley, pearled","Sorghum","Bran, sorghum","Oats", "Oats rolled","Rye","Triticale","Millet",
"Bran, millet","Buckwheat", "Fonio", "Flour, fonio", "Quinoa",
"Grain, mixed","Cereal preparations, nes","Flour, cereals","Cereals, nes",
"Soybeans","Cake, soybeans","Oil, soybean",
"Oil palm fruit","Oil, palm","Palm kernels","Oil, palm kernel","Cake, palm kernel",
"Rapeseed","Cake, rapeseed","Oil, rapeseed","Mustard seed","Cake, mustard","Flour, mustard","Sunflower seed",
"Cake, sunflower", "Oil, sunflower", "Groundnuts, with shell","Groundnuts, shelled","Peanut butter",
"Cake, groundnuts","Oil, groundnut","Coconuts","Coconuts, desiccated","Castor oil seed"," Oil, castor beans",
"Hempseed","Cake, hempseed"," Karite nuts (sheanuts)","Butter of karite nuts","Linseed","Cake, linseed",
"Oil, linseed","Olives","Oil, olive, virgin","Olives preserved"," Poppy seed","Oil, poppy","Sesame seed",
"Cake, sesame seed","Oil, sesame",
"Sugar beet","Beet pulp","Sugar cane","Sugar non-centrifugal","Cane tops","Molasses",
"Cassava","Cassava leaves","Cassava dried","Starch, cassava","Potatoes",
"Flour, potatoes","Potatoes, frozen","Potato offals","Sweet potatoes","Yams","Taro (cocoyam)",
"Yautia (cocoyam)","Flour, roots and tubers nes","Roots and tubers, nes",
"Artichokes","Asparagus","Beans, green"," Cabbages and other brassicas"," Carrots and turnips",
"Cauliflowers and broccoli","Chillies and peppers, green","Cucumbers and gherkins","Eggplants (aubergines)",
"Garlic","Leeks, other alliaceous vegetables","Lettuce and chicory"," Mushrooms and truffles","Okra",
"Onions, dry", "Onions, shallots, green","Peas, green","Pumpkins, squash and gourds","Spinach",
"String beans","Tomatoes","Tomatoes, paste","Tomatoes, peeled","Turnips for fodder",
"Mushrooms, canned","Juice, tomato",
"Apples","Apricots","Apricots, dry","Avocados","Bananas","Blueberries","Carobs","Cashewapple",
"Cherries","Cherries, sour","Cranberries","Currants", "Dates", "Figs"," Figs dried", "Gooseberries",
"Grapefruit (inc. pomelos)","Grapes","Raisins", "Kiwi fruit", "Lemons and limes","Mangoes, mangosteens, guavas",
"Melons, other (inc.cantaloupes)","Oranges", "Papayas", "Peaches and nectarines","Pears","Persimmons",
"Pineapples","Pineapples canned", "Plantains and others","Plums and sloes","Plums dried (prunes)",
"Quinces","Raspberries", "Strawberries","Tangerines, mandarins, clementines, satsumas","Watermelons",
"Fruit, citrus nes","Fruit, dried nes","Juice, grapefruit","Juice, grapefruit, concentrated",
"Juice, orange, concentrated","Juice, orange, single strength","Juice, pineapple","Juice, pineapple, concentrated",
"Juice, plum, concentrated","Juice, plum, single strength","Juice, citrus, concentrated",
"Juice, citrus, single strength","Juice, fruit nes","Juice, grape","Juice, lemon, concentrated"), ]
################ save the table for a short cut ######################################################
write.csv(crops_11CG_20,file="crops_11CG_20.csv")
crops_11CG_20<-read.csv("crops_11CG_20.csv",sep=",",header=T)
############################################################################################################
############################################################################################################
##### Second Table, 15 crops #####
############################################################################################################
### load data, table 1
crops_11CG_20<-read.csv("crops_11CG_20.csv",sep=",",header=T)[,-1]
############################################################################################################
############################ Crop selection #########################################
############################################################################################################
# now we would like to create a list which contanines only the 15 crops from the Zabel et al. (2014) paper:
# Barley, Cassava, Groundnut, Maize, Millet, Oil_Palm, Potato, Rapeseed/canola, Rice, Rye, Sorghum, Soybean,
# Sugarcane, Sunflower, Wheat (incl. summer and winter wheat)
crops_16x<-crops_11CG_20[crops_11CG_20$Item %in% c(
"Wheat",
"Rice, paddy",
"Maize","Maize, green",
"Barley","Sorghum","Rye","Millet",
"Soybeans",
"Oil palm fruit","Palm kernels",
"Rapeseed","Sunflower seed", "Groundnuts, with shell",
"Sugar cane",
"Cassava","Cassava leaves","Potatoes"
), ]
# units: Area_harvested=ha, Production = tonnes
################save table for a shortcut #######################################################
write.csv(crops_16x,file="crops_16x.csv")
crops_16x<-read.csv("crops_16x.csv",sep=",",header=T)[,-1]
############################################################################################################
############################################################################################################
##### join kJ data ####
############################################################################################################
# some crops have a different name in the kJ list than in FAO data -> need to correct this
# convert kcal into KJ
# convert KJ/100g into Kj/t
# load data
kcal_1<-read.csv("kcal.csv",sep=";",header=T)
colnames(kcal_1)<-c("ITEM", "kcal_100g")
crops_16ax<-crops_16x
# check whether the crops have the same name in both lists
crops_16ax_U<-unique(crops_16ax$Item)
crops_16ax$Item<-toupper(crops_16ax$Item)
crops_kcal_1U<-unique(kcal_1$ITEM)
# identify crops that exist in the 15 crops list but not in the kcal list (eg. have different name in kcal list)
toupper(crops_16ax_U[which(is.element(toupper(unique(crops_16ax$Item)),unique(toupper(kcal_1$ITEM)))==F)])
kcal_1$ITEM<-as.character(kcal_1$ITEM)
#correct the names in the kcal list
kcal_1$ITEM[which(kcal_1$ITEM=="GREEN CORN")]<-"MAIZE, GREEN"
kcal_1$ITEM[which(kcal_1$ITEM=="RICE PADDY")]<-"RICE, PADDY"
kcal_1$ITEM[which(kcal_1$ITEM=="GROUNDNUTS IN SHELL")]<-"GROUNDNUTS, WITH SHELL"
# convert kcal into KJ
#1 kcal = 4,1868 kJ
kcal_1$kj_100g<-NA
kcal_1$kj_100g<-kcal_1$kcal_100g*4.1868
#per tonne
kcal_1$kj_t<-NA
kcal_1$kj_t<-kcal_1$kj_100g*1e+4
################save table for a shortcut ########################################################
write.csv(kcal_1,file="kcal_1.csv")
kcal_1<-read.csv("kcal_1.csv",sep=",",header=T)
#############################################################################################################
# integrate the kcal data in our current table
# kcal_1 unit: kJ per tonne
#kj per tonne into 15 crop list
crops_16ax$Item<-toupper(crops_16ax$Item)
crops_16ax$kj_t_crop<-NA
for(i in 1:length(crops_16ax[,1])){
crops_16ax$kj_t_crop[i]<-kcal_1$kj_t[which(kcal_1$ITEM==crops_16ax$Item[i])]}
################save table for a shortcut ########################################################
write.csv(crops_16ax,file="crops_16ax.csv")
crops_16ax<-read.csv("crops_16ax.csv",sep=",",header=T)
#############################################################################################################
# multiply the kJ value of each crop with the respective production of each country and year
crops_16bx<-crops_16ax
# new row
crops_16bx$production_kj<-NA
# calculated, t * kj/t -> kj
crops_16bx$production_kj<-crops_16bx$Production_t*crops_16bx$kj_t_crop
################save table for a shortcut ########################################################
write.csv(crops_16bx,file="crops_16bx.csv")
crops_16bx<-read.csv("crops_16bx.csv",sep=",",header=T)[,-1]
#############################################################################################################
crops_16cx<-crops_16bx
# aggregate kJ values in order to get only one value per year, country and column
# new rownames
row.names(crops_16cx)<-1:length(crops_16cx[,1])
# create a code for the aggregation
crops_16cx$code_Area_Year<-NA
crops_16cx$code_Area_Year<-paste(gsub("[ ]","_",crops_16cx$Area),crops_16cx$Year)
# aggregate kJ values for each column separately
kJ_total_production <- aggregate(production_kj~code_Area_Year, data=crops_16cx,sum)
Area_H<- aggregate(Area_harvested_ha~code_Area_Year, data=crops_16cx,sum)
# merge them again into one table
total_kJ <- merge(kJ_total_production,Area_H, by="code_Area_Year",all=T)
# split up the code
y<-unlist(strsplit(total_kJ[,1], " "))
y1<-paste(y[seq(2,length(y),2)])
total_kJ[,4]<-y1
y2<-paste(y[seq(1,length(y),2)])
total_kJ[,5]<-y2
# new column names
colnames(total_kJ)[colnames(total_kJ)=="V5"] <- "Country"
colnames(total_kJ)[colnames(total_kJ)=="V4"] <- "Year"
# new column order
crops_16cx_agg<-total_kJ[,c("Country","Year","Area_harvested_ha","production_kj","code_Area_Year")]
################save table for a shortcut ########################################################
write.csv(crops_16cx_agg,file="crops_16cx_agg.csv")
crops_16cx_agg<-read.csv("crops_16cx_agg.csv",sep=",",header=T)[,-1]
#############################################################################################################
#############################################################################################################
### integrate crop-specific fertilizer data
### Refer to scripts Fertilizer_2000.R, Fertilizer_2006.R, Fertilizer_2010.R, Fertilizer_2014.R
#############################################################################################################
crops_16ex<-crops_16cx_agg
# load data
Fertilizer_2000b<-read.csv("Fertilizer_2000b.csv",sep=",",header=T)[,c("Area","N_total_kj","sum_t")]
Fertilizer_2000b$Year<-"2000"
colnames(Fertilizer_2000b)[colnames(Fertilizer_2000b)=="sum_t"] <- "N_total_t"
Fertilizer_2006d<-read.csv("Fertilizer_2006d.csv",sep=",",header=T)[,c("Area","N_total_kj","sum_t")]
Fertilizer_2006d$Year<-"2006"
colnames(Fertilizer_2006d)[colnames(Fertilizer_2006d)=="sum_t"] <- "N_total_t"
Fertilizer_2010c<-read.csv("Fertilizer_2010c.csv",sep=",",header=T)[,c("Area","N_total_kj","sum_t")]
Fertilizer_2010c$Year<-"2010"
colnames(Fertilizer_2010c)[colnames(Fertilizer_2010c)=="sum_t"] <- "N_total_t"
Fertilizer_2014c<-read.csv("Fertilizer_2014c.csv",sep=",",header=T)[,c("Area","N_total_kj","sum_t")]
Fertilizer_2014c$Year<-"2014"
colnames(Fertilizer_2014c)[colnames(Fertilizer_2014c)=="sum_t"] <- "N_total_t"
# bind the four tables into one
Fertilizer_all<-rbind(Fertilizer_2000b,Fertilizer_2006d,Fertilizer_2010c,Fertilizer_2014c)
# create a code
Fertilizer_all$code_Area_Year<-NA
Fertilizer_all$code_Area_Year<-paste(gsub("[ ]","_",Fertilizer_all$Area),Fertilizer_all$Year)
Fertilizer_all$code_Area_Year<-as.character(Fertilizer_all$code_Area_Year)
crops_16ex$code_Area_Year<-as.character(crops_16ex$code_Area_Year)
code2<-unique(Fertilizer_all$code_Area_Year)
# new column for the total energy consumption production of total fertilizer used per country and year
crops_16ex$N_total_kj<-NA
#fill column
for(i in 1:length(crops_16ex[,1])){
if(is.element(crops_16ex$code_Area_Year[i],code2)){
crops_16ex$N_total_kj[i]<-Fertilizer_all$N_total_kj[which(Fertilizer_all$code_Area_Year==crops_16ex$code_Area_Year[i])]}
else {crops_16ex$N_total_kj[i]<-NA}}
# new column for the total fertilizer used in t per country and year
crops_16ex$N_total_t<-NA
#fill column
for(i in 1:length(crops_16ex[,1])){
if(is.element(crops_16ex$code_Area_Year[i],code2)){
crops_16ex$N_total_t[i]<-Fertilizer_all$N_total_t[which(Fertilizer_all$code_Area_Year==crops_16ex$code_Area_Year[i])]}
else {crops_16ex$N_total_t[i]<-NA}}
################save the table for a shortcut ########################################################
write.csv(crops_16ex,file="crops_16ex.csv")
crops_16ex<-read.csv("crops_16ex.csv",sep=",",header=T)[,-1]
#############################################################################################################
# new colum order
crops_16fx<-crops_16ex[,c("Country","Year","Area_harvested_ha","N_total_kj","N_total_t","production_kj","code_Area_Year")]
#############################################################################################################
### crop suitability data #####
#############################################################################################################
# area-weighted suitability values for whole country, based on cropland extent of Monfreda et al. (2008)
crop_suit_zabel<-read.csv("cropsuit_overall.csv",sep=";",header=T)
crop_suit_zabel$NAME_LONG<-paste(gsub("[ ]","_",crop_suit_zabel$NAME_LONG))
code_k<-unique(crops_16fx$Country)
code_k[which(is.element(unique(crops_16fx$Country),unique(crop_suit_zabel$NAME_LONG))==F)]
# correct names of countries
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="China")]<-"China,_mainland"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Iran")]<-"Iran_(Islamic_Republic_of)"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Moldova")]<-"Republic_of_Moldova"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Syria")]<-"Syrian_Arab_Republic"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Tanzania")]<-"United_Republic_of_Tanzania"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="United_States")]<-"United_States_of_America"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
crop_suit_zabel$NAME_LONG[which(crop_suit_zabel$NAME_LONG=="Vietnam")]<-"Viet_Nam"
crop_suit_zabel$code_Area_Year<-NA
crop_suit_zabel$code_Area_Year<-paste(gsub("[ ]","_",crop_suit_zabel$NAME_LONG),"2010")
code_suit<-unique(crops_16fx$Country)
crops_16fx$suitability<-NA
for(i in 1:length(crops_16fx[,1])){
if((is.element(crops_16fx$Country[i],code_suit))&(crops_16fx$Year[i]=="2010")){
crops_16fx$suitability[i]<-crop_suit_zabel$SUM[which(crop_suit_zabel$NAME_LONG==crops_16fx$Country[i])]}
else {crops_16fx$suitability[i]<-NA}}
# suitability of 2010 for each year
years<-c("2000","2006","2010","2014")
code_suit<-unique(crops_16fx$Country)
crops_16fx$suitability<-NA
for(i in 1:length(crops_16fx[,1])){
if((is.element(crops_16fx$Country[i],code_suit))&(is.element(crops_16fx$Year[i],years))){
crops_16fx$suitability[i]<-crop_suit_zabel$SUM[which(crop_suit_zabel$NAME_LONG==crops_16fx$Country[i])]}
else {crops_16fx$suitability[i]<-NA}}
## crop suitability data only on cropland as indicated by ESA CCI land cover data,
#not as an average for the country as a whole
#crop_suit_zabel_00<-read.csv("cropsuit_Zabel_00.csv",sep=";",header=T)
#crop_suit_zabel_00$NAME_LONG<-paste(gsub("[ ]","_",crop_suit_zabel_00$NAME_LONG))
#crop_suit_zabel_06<-read.csv("cropsuit_Zabel_06.csv",sep=";",header=T)
#crop_suit_zabel_06$NAME_LONG<-paste(gsub("[ ]","_",crop_suit_zabel_06$NAME_LONG))
#crop_suit_zabel_10<-read.csv("cropsuit_Zabel_10.csv",sep=";",header=T)
#crop_suit_zabel_10$NAME_LONG<-paste(gsub("[ ]","_",crop_suit_zabel_10$NAME_LONG))
#crop_suit_zabel_14<-read.csv("cropsuit_Zabel_14.csv",sep=";",header=T)
#crop_suit_zabel_14$NAME_LONG<-paste(gsub("[ ]","_",crop_suit_zabel_14$NAME_LONG))
#code_k<-unique(crops_16fx$Country)
#code_k[which(is.element(unique(crops_16fx$Country),unique(crop_suit_zabel_14$NAME_LONG))==F)]
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Czech_Republic")]<-"Czechia"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="China")]<-"China,_mainland"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Czech_Republic")]<-"Czechia"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Iran")]<-"Iran_(Islamic_Republic_of)"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Moldova")]<-"Republic_of_Moldova"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Syria")]<-"Syrian_Arab_Republic"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Tanzania")]<-"United_Republic_of_Tanzania"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="United_States")]<-"United_States_of_America"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
#crop_suit_zabel_00$NAME_LONG[which(crop_suit_zabel_00$NAME_LONG=="Vietnam")]<-"Viet_Nam"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="China")]<-"China,_mainland"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Iran")]<-"Iran_(Islamic_Republic_of)"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Moldova")]<-"Republic_of_Moldova"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Syria")]<-"Syrian_Arab_Republic"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Tanzania")]<-"United_Republic_of_Tanzania"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="United_States")]<-"United_States_of_America"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
crop_suit_zabel_06$NAME_LONG[which(crop_suit_zabel_06$NAME_LONG=="Vietnam")]<-"Viet_Nam"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="China")]<-"China,_mainland"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Iran")]<-"Iran_(Islamic_Republic_of)"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Moldova")]<-"Republic_of_Moldova"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Syria")]<-"Syrian_Arab_Republic"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Tanzania")]<-"United_Republic_of_Tanzania"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="United_States")]<-"United_States_of_America"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
crop_suit_zabel_10$NAME_LONG[which(crop_suit_zabel_10$NAME_LONG=="Vietnam")]<-"Viet_Nam"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="China")]<-"China,_mainland"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Czech_Republic")]<-"Czechia"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Iran")]<-"Iran_(Islamic_Republic_of)"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Moldova")]<-"Republic_of_Moldova"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Syria")]<-"Syrian_Arab_Republic"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Tanzania")]<-"United_Republic_of_Tanzania"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="United_States")]<-"United_States_of_America"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
crop_suit_zabel_14$NAME_LONG[which(crop_suit_zabel_14$NAME_LONG=="Vietnam")]<-"Viet_Nam"
#crops_16fx$cropsuit<-NA
#for(i in 1:length(crops_16fx[,1])){
# if((is.element(crops_16fx$Country[i],code_k))&(crops_16fx$Year[i]=="2000")){
# crops_16fx$cropsuit[i]<-crop_suit_zabel_00$MEAN[which(crop_suit_zabel_00$NAME_LONG==crops_16fx$Country[i])]}
# if((is.element(crops_16fx$Country[i],code_k))&(crops_16fx$Year[i]=="2006")){
# crops_16fx$cropsuit[i]<-crop_suit_zabel_06$MEAN[which(crop_suit_zabel_06$NAME_LONG==crops_16fx$Country[i])]}
# if((is.element(crops_16fx$Country[i],code_k))&(crops_16fx$Year[i]=="2010")){
# crops_16fx$cropsuit[i]<-crop_suit_zabel_10$MEAN[which(crop_suit_zabel_10$NAME_LONG==crops_16fx$Country[i])]}
# if((is.element(crops_16fx$Country[i],code_k))&(crops_16fx$Year[i]=="2014")){
# crops_16fx$cropsuit[i]<-crop_suit_zabel_14$MEAN[which(crop_suit_zabel_14$NAME_LONG==crops_16fx$Country[i])]}
#}
#############################################################################################################
### market influence data #####
#############################################################################################################
# market influence data on cropland extent based on Monfreda et al. (2008)
market <- read.csv("market_15crops.csv", sep = ";", header = T)
colnames(market)<-c("Country","Mean")
market$Country<-paste(gsub("[ ]","_",market$Country))
code_k[which(is.element(unique(crops_16fx$Country),unique(market$Country))==F)]
market$Country[which(market$Country=="Bolivia")]<-"Bolivia_(Plurinational_State_of)"
market$Country[which(market$Country=="China")]<-"China,_mainland"
market$Country[which(market$Country=="Czech_Republic")]<-"Czechia"
market$Country[which(market$Country=="Iran")]<-"Iran_(Islamic_Republic_of)"
market$Country[which(market$Country=="Lao_PDR")]<-"Lao_People's_Democratic_Republic"
market$Country[which(market$Country=="Moldova")]<-"Republic_of_Moldova"
market$Country[which(market$Country=="Syria")]<-"Syrian_Arab_Republic"
market$Country[which(market$Country=="Tanzania")]<-"United_Republic_of_Tanzania"
market$Country[which(market$Country=="United_States")]<-"United_States_of_America"
market$Country[which(market$Country=="Venezuela")]<-"Venezuela_(Bolivarian_Republic_of)"
market$Country[which(market$Country=="Vietnam")]<-"Viet_Nam"
# market influence value for each year
years<-c("2000","2006","2010","2014")
code_k<-unique(crops_16fx$Country)
crops_16fx$market<-NA
for(i in 1:length(crops_16fx[,1])){
if((is.element(crops_16fx$Country[i],code_market))&(is.element(crops_16fx$Year[i],years))){
crops_16fx$market[i]<-market$Mean[which(market$Country==crops_16fx$Country[i])]}
else {crops_16fx$market[i]<-NA}}
#############################################################################################################
### net capital stock #####
#############################################################################################################
net_cap<-read.csv("FAOSTAT_data_12-12-2018.csv",sep=",",header=T)
net_cap$Area<-paste(gsub("[ ]","_",net_cap$Area))
code_k<-unique(crops_16fx$Country)
code_k[which(is.element(unique(crops_16fx$Country),unique(net_cap$Area))==F)]
net_cap$Area[which(net_cap$Area=="")]<-"Croatia"
net_cap$code_Area_Year<-NA
net_cap$code_Area_Year<-paste(gsub("[ ]","_",net_cap$Area),net_cap$Year)
crops_16fx$net_capital_stocks<-NA
code_nk<-unique(net_cap$code_Area_Year)
for(i in 1:length(crops_16fx[,1])){
if(is.element(crops_16fx$code_Area_Year[i],code_nk)){
crops_16fx$net_capital_stocks[i]<-net_cap$Value[which(net_cap$code_Area_Year==crops_16fx$code_Area_Year[i])]}
else {crops_16fx$net_capital_stocks[i]<-NA}}
# correct values for Belarus for net_capital_stocks
crops_16fx[which(crops_16fx$Country=="Belarus"&crops_16fx$Year=="2000"),"net_capital_stocks"]<-16379.36
crops_16fx[which(crops_16fx$Country=="Belarus"&crops_16fx$Year=="2006"),"net_capital_stocks"]<-20439.1
crops_16fx[which(crops_16fx$Country=="Belarus"&crops_16fx$Year=="2010"),"net_capital_stocks"]<-26802.53
crops_16fx[which(crops_16fx$Country=="Belarus"&crops_16fx$Year=="2014"),"net_capital_stocks"]<-26658.14
#correct net capital stock prices to 2014 values with CPI data https://data.bls.gov/cgi-bin/cpicalc.pl?cost1=1.00&year1=200001&year2=201401
crops_16fx$net_capital_stocks14 <- NA
for(i in 1:length(crops_16fx[,1])){
if(crops_16fx$Year[i]=="2000"){
crops_16fx$net_capital_stocks14[i]<-crops_16fx$net_capital_stocks[i]*1.39}
if(crops_16fx$Year[i]=="2006"){
crops_16fx$net_capital_stocks14[i]<-crops_16fx$net_capital_stocks[i]*1.18}
if(crops_16fx$Year[i]=="2010"){
crops_16fx$net_capital_stocks14[i]<-crops_16fx$net_capital_stocks[i]*1.08}
if(crops_16fx$Year[i]=="2014"){
crops_16fx$net_capital_stocks14[i]<-crops_16fx$net_capital_stocks[i]*1.00}
}
#############################################################################################################
### Manure use data #####
#############################################################################################################
Manure<-read.csv("manure_FAO.csv",sep=";",header=T)
Manure$Area<-paste(gsub("[ ]","_",Manure$Area))
code_k<-unique(crops_16fx$Country)
code_k[which(is.element(unique(crops_16fx$Country),unique(Manure$Area))==F)]
Manure<-Manure[which(Manure$Element=="Manure applied to soils (N content)"),]
Manure$code_Area_Year<-NA
Manure$code_Area_Year<-paste(gsub("[ ]","_",Manure$Area),Manure$Year)
Manure$code_Area_Year<-as.character(Manure$code_Area_Year)
crops_16fx$code_Area_Year<-as.character(crops_16fx$code_Area_Year)
crops_16fx$Manure_kg<-NA
code_man <-unique(Manure$code_Area_Year)
for(i in 1:length(crops_16fx[,1])){
if(is.element(crops_16fx$code_Area_Year[i],code_man)){
crops_16fx$Manure_kg[i]<-Manure$Value[which(Manure$code_Area_Year==crops_16fx$code_Area_Year[i])]}
else {crops_16fx$Manure_kg[i]<-NA}}
################ save table for a shortcut #######################################################
write.csv(crops_16fx,file="crops_16fx.csv")
crops_16fx<-read.csv("crops_16fx.csv",sep=",",header=T)[,-1]
##############################################################################################################
##Clean the data
crops_16Lx_clean<-crops_16fx
# input/ area harvested
crops_16Lx_clean$N_div_AreaH_kj_ha<-NA
crops_16Lx_clean$N_div_AreaH_kj_ha<-crops_16Lx_clean$N_total_kj/crops_16Lx_clean$Area_harvested_ha
# output/ area harvested
crops_16Lx_clean$production_div_AreaH_kj_ha<-NA
crops_16Lx_clean$production_div_AreaH_kj_ha<-crops_16Lx_clean$production_kj/crops_16Lx_clean$Area_harvested_ha
# Manure/area harvested
crops_16Lx_clean$manure_area<-crops_16Lx_clean$Manure_kg/crops_16Lx_clean$Area_harvested_ha
# net_capital_stocks/area harvested
crops_16Lx_clean$netcap_area<-crops_16Lx_clean$net_capital_stocks/crops_16Lx_clean$Area_harvested_ha
#net_capital_stocks14/area harvested
crops_16Lx_clean$netcap14_area<-crops_16Lx_clean$net_capital_stocks14/crops_16Lx_clean$Area_harvested_ha
#production_kJ/N_total_kj
crops_16Lx_clean$TFP <- crops_16Lx_clean$production_kj/crops_16Lx_clean$N_total_kj
#Nitrogen in tonnes per ha
crops_16Lx_clean$Nt_area <- NA
crops_16Lx_clean$Nt_area <- crops_16Lx_clean$N_total_t/crops_16Lx_clean$Area_harvested_ha
################ save table for a short cut #######################################################
write.csv(crops_16Lx_clean,file="crops_16Lx_clean.csv")
crops_16Lx_clean<-read.csv("crops_16Lx_clean.csv",sep=",",header=T)[,-1]
#############################################################################################################
# subset of data with renamed variables
raw_data<-crops_16Lx_clean[,c("Country","Year","N_div_AreaH_kj_ha","Nt_area","production_div_AreaH_kj_ha","suitability","manure_area","netcap_area", "netcap14_area" ,"market")]
names(raw_data)<-c("country","year","N_area","Nt_area","yield","cropsuit","manure_area","netcap_area","netcap14_area","market")
################ save table for a short cut #######################################################
write.csv(raw_data,file="raw_data.csv")
raw_data<-read.csv("raw_data.csv",sep=",",header=T)[,-1]
#############################################################################################################
### Aggregate EU27 data and merge with subset of data
crops_16L_clean<-read.csv("crops_16Lx_clean.csv",sep=",",header=T)[,-1]
## Create EU27 data and keep it separate
# no net capital stock data for Croatia, hence EU27 without Croatia
EU27 <- c("Austria","Belgium","Bulgaria","Cyprus","Denmark","Estonia","Finland","France","Germany","Greece",
"Ireland","Italy","Latvia","Lithuania","Luxembourg","Malta","Netherlands","Poland","Romania",
"Portugal","Slovakia","Slovenia","Spain","Sweden","United_Kingdom","Hungary","Czechia")
crops_16L_clean$EU27 <- NA
for(i in 1:length(crops_16L_clean[,1])){
if(is.element(crops_16L_clean$Country[i],EU27)){
crops_16L_clean$EU27[i]<-"EU27"}}
# load data for 2000
crops_16L_00<-crops_16L_clean[which(crops_16L_clean$Year=="2000"),]
# load data for 2006
crops_16L_06<-crops_16L_clean[which(crops_16L_clean$Year=="2006"),]
# load data for 2010
crops_16L_10<-crops_16L_clean[which(crops_16L_clean$Year=="2010"),]
# load data for 2014
crops_16L_14<-crops_16L_clean[which(crops_16L_clean$Year=="2014"),]
crops_16L_00_EU27<-crops_16L_00[which(crops_16L_00$EU27=="EU27"),]
crops_16L_06_EU27<-crops_16L_06[which(crops_16L_06$EU27=="EU27"),]
crops_16L_10_EU27<-crops_16L_10[which(crops_16L_10$EU27=="EU27"),]
crops_16L_14_EU27<-crops_16L_14[which(crops_16L_14$EU27=="EU27"),]
#sum production EU27
prod_sum_00_EU <- sum(crops_16L_00_EU27$production_kj)
prod_sum_06_EU <- sum(crops_16L_06_EU27$production_kj)
prod_sum_10_EU <- sum(crops_16L_10_EU27$production_kj)
prod_sum_14_EU <- sum(crops_16L_14_EU27$production_kj)
# one dataframe for EU27 per year
data_EU27_00 <- data.frame("country"="EU27","year"=2000)
data_EU27_00$N_area<-NA
data_EU27_00$Nt_area<-NA
data_EU27_00$yield<-NA
data_EU27_00$cropsuit<-NA
data_EU27_00$manure_area<-NA
data_EU27_00$netcap_area<-NA
data_EU27_00$netcap14_area<-NA
data_EU27_00$market<-NA
data_EU27_00$N_area <- sum(crops_16L_00_EU27$N_total_kj)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$Nt_area <- sum(crops_16L_00_EU27$N_total_t)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$yield <- sum(crops_16L_00_EU27$production_kj)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$cropsuit <- 47.0027606
data_EU27_00$manure_area <- sum(crops_16L_00_EU27$Manure_kg)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$netcap_area <- sum(crops_16L_00_EU27$net_capital_stocks)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$netcap14_area <- sum(crops_16L_00_EU27$net_capital_stocks14)/sum(crops_16L_00_EU27$Area_harvested_ha)
data_EU27_00$market <- 14327.100380
data_EU27_06 <- data.frame("country"="EU27","year"=2006)
data_EU27_06$N_area<-NA
data_EU27_06$Nt_area<-NA
data_EU27_06$yield<-NA
data_EU27_06$cropsuit<-NA
data_EU27_06$manure_area<-NA
data_EU27_06$netcap_area<-NA
data_EU27_06$netcap14_area<-NA
data_EU27_06$market<-NA
data_EU27_06$N_area <- sum(crops_16L_06_EU27$N_total_kj)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$Nt_area <- sum(crops_16L_06_EU27$N_total_t)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$yield <- sum(crops_16L_06_EU27$production_kj)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$cropsuit <- 47.0027606
data_EU27_06$manure_area <- sum(crops_16L_06_EU27$Manure_kg)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$netcap_area <- sum(crops_16L_06_EU27$net_capital_stocks)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$netcap14_area <- sum(crops_16L_06_EU27$net_capital_stocks14)/sum(crops_16L_06_EU27$Area_harvested_ha)
data_EU27_06$market <- 14327.100380
data_EU27_10 <- data.frame("country"="EU27","year"=2010)
data_EU27_10$N_area<-NA
data_EU27_10$Nt_area<-NA
data_EU27_10$yield<-NA
data_EU27_10$cropsuit<-NA
data_EU27_10$manure_area<-NA
data_EU27_10$netcap_area<-NA
data_EU27_10$netcap14_area<-NA
data_EU27_10$market<-NA
data_EU27_10$N_area <- sum(crops_16L_10_EU27$N_total_kj)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$Nt_area <- sum(crops_16L_10_EU27$N_total_t)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$yield <- sum(crops_16L_10_EU27$production_kj)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$cropsuit <- 47.0027606
data_EU27_10$manure_area <- sum(crops_16L_10_EU27$Manure_kg)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$netcap_area <- sum(crops_16L_10_EU27$net_capital_stocks)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$netcap14_area <- sum(crops_16L_10_EU27$net_capital_stocks14)/sum(crops_16L_10_EU27$Area_harvested_ha)
data_EU27_10$market <- 14327.100380
data_EU27_14 <- data.frame("country"="EU27","year"=2014)
data_EU27_14$N_area<-NA
data_EU27_14$Nt_area<-NA
data_EU27_14$yield<-NA
data_EU27_14$cropsuit<-NA
data_EU27_14$manure_area<-NA
data_EU27_14$netcap_area<-NA
data_EU27_14$netcap14_area<-NA
data_EU27_14$market<-NA
data_EU27_14$N_area <- sum(crops_16L_14_EU27$N_total_kj)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$Nt_area <- sum(crops_16L_14_EU27$N_total_t)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$yield <- sum(crops_16L_14_EU27$production_kj)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$cropsuit <- 47.0027606
data_EU27_14$manure_area <- sum(crops_16L_14_EU27$Manure_kg)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$netcap_area <- sum(crops_16L_14_EU27$net_capital_stocks)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$netcap14_area <- sum(crops_16L_14_EU27$net_capital_stocks14)/sum(crops_16L_14_EU27$Area_harvested_ha)
data_EU27_14$market <- 14327.100380
write.csv(data_EU27_00, file="data_EU27_00.csv")
write.csv(data_EU27_06, file="data_EU27_06.csv")
write.csv(data_EU27_10, file="data_EU27_10.csv")
write.csv(data_EU27_14, file="data_EU27_14.csv")
data<-read.csv("raw_data.csv",sep=",",header=T)[,-1]
data <- na.exclude(data)
data <- data[which(data$market!=0),]
data <- data[which(data$cropsuit!=0),]
data_00<-data[which(data$year=="2000"),]
data_06<-data[which(data$year=="2006"),]
data_10<-data[which(data$year=="2010"),]
data_14<-data[which(data$year=="2014"),]
# one dataframe for each year
data_EU_00 <- rbind(data_00,data_EU27_00)
data_EU_06 <- rbind(data_06,data_EU27_06)
data_EU_10 <- rbind(data_10,data_EU27_10)
data_EU_14 <- rbind(data_14,data_EU27_14)
#extract IFA countries, Belarus for 2000 removed because of incoherent value
IFA_14 <- c("EU27","Argentina","Australia","Bangladesh", "Belarus","Brazil","Canada",
"China,_mainland","Chile","Egypt","India","Indonesia","Iran_(Islamic_Republic_of)","Japan",
"Malaysia","Mexico","Morocco","New_Zealand","Pakistan","Philippines","Russian_Federation",
"South_Africa","Thailand","Turkey","Ukraine","United_States_of_America","Uzbekistan","Viet_Nam")
IFA_10 <- c("EU27","Argentina","Australia","Bangladesh", "Belarus","Brazil","Canada",
"China,_mainland","Chile","Egypt","India","Indonesia","Iran_(Islamic_Republic_of)","Japan",
"Malaysia","Mexico","Morocco","Pakistan","Philippines","Russian_Federation",
"South_Africa","Thailand","Turkey","Ukraine","United_States_of_America","Uzbekistan","Viet_Nam")
IFA_06 <- c("EU27","Argentina","Australia","Bangladesh","Brazil","Canada",
"China,_mainland","Chile","Egypt","India","Indonesia","Iran_(Islamic_Republic_of)","Japan",
"Malaysia","Mexico","Morocco","Pakistan","Philippines","Russian_Federation",
"South_Africa","Thailand","Turkey","Ukraine","United_States_of_America","Uzbekistan","Viet_Nam")
IFA_00 <- c("EU27","Albania", "Australia", "Azerbaijan", "Bangladesh", "Canada", "China,_Taiwan_Province_of",
"Croatia", "El_Salvador", "Ethiopia", "Fiji", "Israel", "Japan", "Republic_of_Korea", "Kuwait", "Republic_of_Moldova",
"Morocco","New_Zealand", "Nigeria", "Norway", "Pakistan", "Saudi_Arabia", "South_Africa", "Sri_Lanka",
"Switzerland", "Syrian_Arab_Republic", "Zambia",
"Algeria", "Argentina", "Bolivia_(Plurinational_State_of)", "Brazil", "Cambodia", "Chile", "China,_mainland",
"Colombia", "Costa_Rica", "Dominican_Republic", "Ecuador", "Egypt", "Guatemala", "Guinea", "Honduras",
"India", "Indonesia", "Jordan", "Kenya", "Lao_People's_Democratic_Republic", "Lebanon", "Madagascar",
"Malawi", "Malaysia", "Mauritania", "Mexico", "Myanmar", "Nicaragua", "Paraguay", "Philippines", "United_Republic_of_Tanzania",
"Thailand", "Togo", "Turkey","United_States_of_America", "Uruguay", "Venezuela_(Bolivarian_Republic_of)",
"Viet_Nam","Zimbabwe")
data_EU_00$IFA_Region <- NA
data_EU_06$IFA_Region <- NA
data_EU_10$IFA_Region <- NA
data_EU_14$IFA_Region <- NA
data_EU_00$country <- as.character(data_EU_00$country)
data_EU_06$country <- as.character(data_EU_06$country)
data_EU_10$country <- as.character(data_EU_10$country)
data_EU_14$country <- as.character(data_EU_14$country)
for(i in 1:length(data_EU_00[,1])){
if(is.element(data_EU_00$country[i],IFA_00)){
data_EU_00$IFA_Region[i]<-"IFA"}}
for(i in 1:length(data_EU_06[,1])){
if(is.element(data_EU_06$country[i],IFA_06)){
data_EU_06$IFA_Region[i]<-"IFA"}}
for(i in 1:length(data_EU_10[,1])){
if(is.element(data_EU_10$country[i],IFA_10)){
data_EU_10$IFA_Region[i]<-"IFA"}}
for(i in 1:length(data_EU_14[,1])){
if(is.element(data_EU_14$country[i],IFA_14)){
data_EU_14$IFA_Region[i]<-"IFA"}}
data_EU_IFA <- rbind(data_EU_00,data_EU_06,data_EU_10,data_EU_14)
data_EU_IFA_sel <- data_EU_IFA[which(data_EU_IFA$IFA_Region=="IFA"),]
data_EU_IFA_sel$country <- as.character(data_EU_IFA_sel$country)
write.csv(data_EU_IFA_sel,file="data_EU_IFA_sel.csv")
data <- read.csv("data_EU_IFA_sel.csv")[,-1]
data_mean <- data.frame("country"=unique(data$country))
data_mean$cropsuit <- NA
code_suit<-unique(data_mean$country)
for(i in 1:length(data_mean[,1])){
if((is.element(data_mean$country[i],code_suit))){
data_mean$cropsuit[i]<-data$cropsuit[which(data$country==data_mean$country[i])]}
else {data_mean$cropsuit[i]<-NA}}
data_mean$market <- NA
for(i in 1:length(data_mean[,1])){
if((is.element(data_mean$country[i],code_suit))){
data_mean$market[i]<-data$market[which(data$country==data_mean$country[i])]}
else {data_mean$cropsuit[i]<-NA}}
data_mean$Nt_area <- NA
data_mean$Nt_area[which(data_mean$country=="Albania")] <- mean(data$Nt_area[which(data$country=="Albania")])
data_mean$Nt_area[which(data_mean$country=="Algeria")] <- mean(data$Nt_area[which(data$country=="Algeria")])
data_mean$Nt_area[which(data_mean$country=="Argentina")] <- mean(data$Nt_area[which(data$country=="Argentina")])
data_mean$Nt_area[which(data_mean$country=="Australia")] <- mean(data$Nt_area[which(data$country=="Australia")])
data_mean$Nt_area[which(data_mean$country=="Azerbaijan")] <- mean(data$Nt_area[which(data$country=="Azerbaijan")])
data_mean$Nt_area[which(data_mean$country=="Bangladesh")] <- mean(data$Nt_area[which(data$country=="Bangladesh")])
data_mean$Nt_area[which(data_mean$country=="Bolivia_(Plurinational_State_of)")] <- mean(data$Nt_area[which(data$country=="Bolivia_(Plurinational_State_of)")])
data_mean$Nt_area[which(data_mean$country=="Brazil")] <- mean(data$Nt_area[which(data$country=="Brazil")])
data_mean$Nt_area[which(data_mean$country=="Cambodia")] <- mean(data$Nt_area[which(data$country=="Cambodia")])
data_mean$Nt_area[which(data_mean$country=="Canada")] <- mean(data$Nt_area[which(data$country=="Canada")])
data_mean$Nt_area[which(data_mean$country=="Chile")] <- mean(data$Nt_area[which(data$country=="Chile")])
data_mean$Nt_area[which(data_mean$country=="China,_mainland")] <- mean(data$Nt_area[which(data$country=="China,_mainland")])
data_mean$Nt_area[which(data_mean$country=="Colombia")] <- mean(data$Nt_area[which(data$country=="Colombia")])
data_mean$Nt_area[which(data_mean$country=="Costa_Rica")] <- mean(data$Nt_area[which(data$country=="Costa_Rica")])
data_mean$Nt_area[which(data_mean$country=="Dominican_Republic")] <- mean(data$Nt_area[which(data$country=="Dominican_Republic")])
data_mean$Nt_area[which(data_mean$country=="Ecuador")] <- mean(data$Nt_area[which(data$country=="Ecuador")])
data_mean$Nt_area[which(data_mean$country=="Egypt")] <- mean(data$Nt_area[which(data$country=="Egypt")])
data_mean$Nt_area[which(data_mean$country=="El_Salvador")] <- mean(data$Nt_area[which(data$country=="El_Salvador")])
data_mean$Nt_area[which(data_mean$country=="Ethiopia")] <- mean(data$Nt_area[which(data$country=="Ethiopia")])
data_mean$Nt_area[which(data_mean$country=="Guatemala")] <- mean(data$Nt_area[which(data$country=="Guatemala")])
data_mean$Nt_area[which(data_mean$country=="Guinea")] <- mean(data$Nt_area[which(data$country=="Guinea")])
data_mean$Nt_area[which(data_mean$country=="Honduras")] <- mean(data$Nt_area[which(data$country=="Honduras")])
data_mean$Nt_area[which(data_mean$country=="India")] <- mean(data$Nt_area[which(data$country=="India")])
data_mean$Nt_area[which(data_mean$country=="Indonesia")] <- mean(data$Nt_area[which(data$country=="Indonesia")])
data_mean$Nt_area[which(data_mean$country=="Israel")] <- mean(data$Nt_area[which(data$country=="Israel")])
data_mean$Nt_area[which(data_mean$country=="Japan")] <- mean(data$Nt_area[which(data$country=="Japan")])
data_mean$Nt_area[which(data_mean$country=="Jordan")] <- mean(data$Nt_area[which(data$country=="Jordan")])
data_mean$Nt_area[which(data_mean$country=="Kenya")] <- mean(data$Nt_area[which(data$country=="Kenya")])
data_mean$Nt_area[which(data_mean$country=="Kuwait")] <- mean(data$Nt_area[which(data$country=="Kuwait")])
data_mean$Nt_area[which(data_mean$country=="Lao_People's_Democratic_Republic")] <- mean(data$Nt_area[which(data$country=="Lao_People's_Democratic_Republic")])
data_mean$Nt_area[which(data_mean$country=="Lebanon")] <- mean(data$Nt_area[which(data$country=="Lebanon")])
data_mean$Nt_area[which(data_mean$country=="Madagascar")] <- mean(data$Nt_area[which(data$country=="Madagascar")])
data_mean$Nt_area[which(data_mean$country=="Malawi")] <- mean(data$Nt_area[which(data$country=="Malawi")])
data_mean$Nt_area[which(data_mean$country=="Malaysia")] <- mean(data$Nt_area[which(data$country=="Malaysia")])
data_mean$Nt_area[which(data_mean$country=="Mauritania")] <- mean(data$Nt_area[which(data$country=="Mauritania")])
data_mean$Nt_area[which(data_mean$country=="Mexico")] <- mean(data$Nt_area[which(data$country=="Mexico")])
data_mean$Nt_area[which(data_mean$country=="Morocco")] <- mean(data$Nt_area[which(data$country=="Morocco")])
data_mean$Nt_area[which(data_mean$country=="Myanmar")] <- mean(data$Nt_area[which(data$country=="Myanmar")])
data_mean$Nt_area[which(data_mean$country=="New_Zealand")] <- mean(data$Nt_area[which(data$country=="New_Zealand")])
data_mean$Nt_area[which(data_mean$country=="Nicaragua")] <- mean(data$Nt_area[which(data$country=="Nicaragua")])
data_mean$Nt_area[which(data_mean$country=="Nigeria")] <- mean(data$Nt_area[which(data$country=="Nigeria")])
data_mean$Nt_area[which(data_mean$country=="Norway")] <- mean(data$Nt_area[which(data$country=="Norway")])
data_mean$Nt_area[which(data_mean$country=="Pakistan")] <- mean(data$Nt_area[which(data$country=="Pakistan")])
data_mean$Nt_area[which(data_mean$country=="Paraguay")] <- mean(data$Nt_area[which(data$country=="Paraguay")])
data_mean$Nt_area[which(data_mean$country=="Philippines")] <- mean(data$Nt_area[which(data$country=="Philippines")])
data_mean$Nt_area[which(data_mean$country=="Republic_of_Korea")] <- mean(data$Nt_area[which(data$country=="Republic_of_Korea")])
data_mean$Nt_area[which(data_mean$country=="Republic_of_Moldova")] <- mean(data$Nt_area[which(data$country=="Republic_of_Moldova")])
data_mean$Nt_area[which(data_mean$country=="Saudi_Arabia")] <- mean(data$Nt_area[which(data$country=="Saudi_Arabia")])
data_mean$Nt_area[which(data_mean$country=="South_Africa")] <- mean(data$Nt_area[which(data$country=="South_Africa")])
data_mean$Nt_area[which(data_mean$country=="Sri_Lanka")] <- mean(data$Nt_area[which(data$country=="Sri_Lanka")])
data_mean$Nt_area[which(data_mean$country=="Switzerland")] <- mean(data$Nt_area[which(data$country=="Switzerland")])
data_mean$Nt_area[which(data_mean$country=="Syrian_Arab_Republic")] <- mean(data$Nt_area[which(data$country=="Syrian_Arab_Republic")])
data_mean$Nt_area[which(data_mean$country=="Thailand")] <- mean(data$Nt_area[which(data$country=="Thailand")])
data_mean$Nt_area[which(data_mean$country=="Togo")] <- mean(data$Nt_area[which(data$country=="Togo")])
data_mean$Nt_area[which(data_mean$country=="Turkey")] <- mean(data$Nt_area[which(data$country=="Turkey")])
data_mean$Nt_area[which(data_mean$country=="United_Republic_of_Tanzania")] <- mean(data$Nt_area[which(data$country=="United_Republic_of_Tanzania")])
data_mean$Nt_area[which(data_mean$country=="United_States_of_America")] <- mean(data$Nt_area[which(data$country=="United_States_of_America")])
data_mean$Nt_area[which(data_mean$country=="Uruguay")] <- mean(data$Nt_area[which(data$country=="Uruguay")])
data_mean$Nt_area[which(data_mean$country=="Venezuela_(Bolivarian_Republic_of)")] <- mean(data$Nt_area[which(data$country=="Venezuela_(Bolivarian_Republic_of)")])
data_mean$Nt_area[which(data_mean$country=="Viet_Nam")] <- mean(data$Nt_area[which(data$country=="Viet_Nam")])
data_mean$Nt_area[which(data_mean$country=="Zambia")] <- mean(data$Nt_area[which(data$country=="Zambia")])
data_mean$Nt_area[which(data_mean$country=="Zimbabwe")] <- mean(data$Nt_area[which(data$country=="Zimbabwe")])
data_mean$Nt_area[which(data_mean$country=="EU27")] <- mean(data$Nt_area[which(data$country=="EU27")])
data_mean$Nt_area[which(data_mean$country=="Iran_(Islamic_Republic_of)")] <- mean(data$Nt_area[which(data$country=="Iran_(Islamic_Republic_of)")])
data_mean$Nt_area[which(data_mean$country=="Russian_Federation")] <- mean(data$Nt_area[which(data$country=="Russian_Federation")])
data_mean$Nt_area[which(data_mean$country=="Ukraine")] <- mean(data$Nt_area[which(data$country=="Ukraine")])
data_mean$Nt_area[which(data_mean$country=="Uzbekistan")] <- mean(data$Nt_area[which(data$country=="Uzbekistan")])
data_mean$Nt_area[which(data_mean$country=="Belarus")] <- mean(data$Nt_area[which(data$country=="Belarus")])
data_mean$yield <- NA
data_mean$yield[which(data_mean$country=="Albania")] <- mean(data$yield[which(data$country=="Albania")])
data_mean$yield[which(data_mean$country=="Algeria")] <- mean(data$yield[which(data$country=="Algeria")])
data_mean$yield[which(data_mean$country=="Argentina")] <- mean(data$yield[which(data$country=="Argentina")])
data_mean$yield[which(data_mean$country=="Australia")] <- mean(data$yield[which(data$country=="Australia")])
data_mean$yield[which(data_mean$country=="Azerbaijan")] <- mean(data$yield[which(data$country=="Azerbaijan")])
data_mean$yield[which(data_mean$country=="Bangladesh")] <- mean(data$yield[which(data$country=="Bangladesh")])
data_mean$yield[which(data_mean$country=="Bolivia_(Plurinational_State_of)")] <- mean(data$yield[which(data$country=="Bolivia_(Plurinational_State_of)")])
data_mean$yield[which(data_mean$country=="Brazil")] <- mean(data$yield[which(data$country=="Brazil")])
data_mean$yield[which(data_mean$country=="Cambodia")] <- mean(data$yield[which(data$country=="Cambodia")])
data_mean$yield[which(data_mean$country=="Canada")] <- mean(data$yield[which(data$country=="Canada")])
data_mean$yield[which(data_mean$country=="Chile")] <- mean(data$yield[which(data$country=="Chile")])
data_mean$yield[which(data_mean$country=="China,_mainland")] <- mean(data$yield[which(data$country=="China,_mainland")])
data_mean$yield[which(data_mean$country=="Colombia")] <- mean(data$yield[which(data$country=="Colombia")])
data_mean$yield[which(data_mean$country=="Costa_Rica")] <- mean(data$yield[which(data$country=="Costa_Rica")])
data_mean$yield[which(data_mean$country=="Dominican_Republic")] <- mean(data$yield[which(data$country=="Dominican_Republic")])
data_mean$yield[which(data_mean$country=="Ecuador")] <- mean(data$yield[which(data$country=="Ecuador")])
data_mean$yield[which(data_mean$country=="Egypt")] <- mean(data$yield[which(data$country=="Egypt")])
data_mean$yield[which(data_mean$country=="El_Salvador")] <- mean(data$yield[which(data$country=="El_Salvador")])
data_mean$yield[which(data_mean$country=="Ethiopia")] <- mean(data$yield[which(data$country=="Ethiopia")])
data_mean$yield[which(data_mean$country=="Guatemala")] <- mean(data$yield[which(data$country=="Guatemala")])
data_mean$yield[which(data_mean$country=="Guinea")] <- mean(data$yield[which(data$country=="Guinea")])
data_mean$yield[which(data_mean$country=="Honduras")] <- mean(data$yield[which(data$country=="Honduras")])
data_mean$yield[which(data_mean$country=="India")] <- mean(data$yield[which(data$country=="India")])
data_mean$yield[which(data_mean$country=="Indonesia")] <- mean(data$yield[which(data$country=="Indonesia")])
data_mean$yield[which(data_mean$country=="Israel")] <- mean(data$yield[which(data$country=="Israel")])
data_mean$yield[which(data_mean$country=="Japan")] <- mean(data$yield[which(data$country=="Japan")])
data_mean$yield[which(data_mean$country=="Jordan")] <- mean(data$yield[which(data$country=="Jordan")])
data_mean$yield[which(data_mean$country=="Kenya")] <- mean(data$yield[which(data$country=="Kenya")])
data_mean$yield[which(data_mean$country=="Kuwait")] <- mean(data$yield[which(data$country=="Kuwait")])
data_mean$yield[which(data_mean$country=="Lao_People's_Democratic_Republic")] <- mean(data$yield[which(data$country=="Lao_People's_Democratic_Republic")])
data_mean$yield[which(data_mean$country=="Lebanon")] <- mean(data$yield[which(data$country=="Lebanon")])
data_mean$yield[which(data_mean$country=="Madagascar")] <- mean(data$yield[which(data$country=="Madagascar")])
data_mean$yield[which(data_mean$country=="Malawi")] <- mean(data$yield[which(data$country=="Malawi")])
data_mean$yield[which(data_mean$country=="Malaysia")] <- mean(data$yield[which(data$country=="Malaysia")])
data_mean$yield[which(data_mean$country=="Mauritania")] <- mean(data$yield[which(data$country=="Mauritania")])
data_mean$yield[which(data_mean$country=="Mexico")] <- mean(data$yield[which(data$country=="Mexico")])
data_mean$yield[which(data_mean$country=="Morocco")] <- mean(data$yield[which(data$country=="Morocco")])
data_mean$yield[which(data_mean$country=="Myanmar")] <- mean(data$yield[which(data$country=="Myanmar")])
data_mean$yield[which(data_mean$country=="New_Zealand")] <- mean(data$yield[which(data$country=="New_Zealand")])
data_mean$yield[which(data_mean$country=="Nicaragua")] <- mean(data$yield[which(data$country=="Nicaragua")])
data_mean$yield[which(data_mean$country=="Nigeria")] <- mean(data$yield[which(data$country=="Nigeria")])
data_mean$yield[which(data_mean$country=="Norway")] <- mean(data$yield[which(data$country=="Norway")])
data_mean$yield[which(data_mean$country=="Pakistan")] <- mean(data$yield[which(data$country=="Pakistan")])
data_mean$yield[which(data_mean$country=="Paraguay")] <- mean(data$yield[which(data$country=="Paraguay")])
data_mean$yield[which(data_mean$country=="Philippines")] <- mean(data$yield[which(data$country=="Philippines")])
data_mean$yield[which(data_mean$country=="Republic_of_Korea")] <- mean(data$yield[which(data$country=="Republic_of_Korea")])
data_mean$yield[which(data_mean$country=="Republic_of_Moldova")] <- mean(data$yield[which(data$country=="Republic_of_Moldova")])
data_mean$yield[which(data_mean$country=="Saudi_Arabia")] <- mean(data$yield[which(data$country=="Saudi_Arabia")])
data_mean$yield[which(data_mean$country=="South_Africa")] <- mean(data$yield[which(data$country=="South_Africa")])
data_mean$yield[which(data_mean$country=="Sri_Lanka")] <- mean(data$yield[which(data$country=="Sri_Lanka")])
data_mean$yield[which(data_mean$country=="Switzerland")] <- mean(data$yield[which(data$country=="Switzerland")])
data_mean$yield[which(data_mean$country=="Syrian_Arab_Republic")] <- mean(data$yield[which(data$country=="Syrian_Arab_Republic")])
data_mean$yield[which(data_mean$country=="Thailand")] <- mean(data$yield[which(data$country=="Thailand")])
data_mean$yield[which(data_mean$country=="Togo")] <- mean(data$yield[which(data$country=="Togo")])
data_mean$yield[which(data_mean$country=="Turkey")] <- mean(data$yield[which(data$country=="Turkey")])
data_mean$yield[which(data_mean$country=="United_Republic_of_Tanzania")] <- mean(data$yield[which(data$country=="United_Republic_of_Tanzania")])
data_mean$yield[which(data_mean$country=="United_States_of_America")] <- mean(data$yield[which(data$country=="United_States_of_America")])
data_mean$yield[which(data_mean$country=="Uruguay")] <- mean(data$yield[which(data$country=="Uruguay")])
data_mean$yield[which(data_mean$country=="Venezuela_(Bolivarian_Republic_of)")] <- mean(data$yield[which(data$country=="Venezuela_(Bolivarian_Republic_of)")])
data_mean$yield[which(data_mean$country=="Viet_Nam")] <- mean(data$yield[which(data$country=="Viet_Nam")])
data_mean$yield[which(data_mean$country=="Zambia")] <- mean(data$yield[which(data$country=="Zambia")])
data_mean$yield[which(data_mean$country=="Zimbabwe")] <- mean(data$yield[which(data$country=="Zimbabwe")])
data_mean$yield[which(data_mean$country=="EU27")] <- mean(data$yield[which(data$country=="EU27")])
data_mean$yield[which(data_mean$country=="Iran_(Islamic_Republic_of)")] <- mean(data$yield[which(data$country=="Iran_(Islamic_Republic_of)")])
data_mean$yield[which(data_mean$country=="Russian_Federation")] <- mean(data$yield[which(data$country=="Russian_Federation")])
data_mean$yield[which(data_mean$country=="Ukraine")] <- mean(data$yield[which(data$country=="Ukraine")])
data_mean$yield[which(data_mean$country=="Uzbekistan")] <- mean(data$yield[which(data$country=="Uzbekistan")])
data_mean$yield[which(data_mean$country=="Belarus")] <- mean(data$yield[which(data$country=="Belarus")])
data_mean$manure_area <- NA
data_mean$manure_area[which(data_mean$country=="Albania")] <- mean(data$manure_area[which(data$country=="Albania")])
data_mean$manure_area[which(data_mean$country=="Algeria")] <- mean(data$manure_area[which(data$country=="Algeria")])
data_mean$manure_area[which(data_mean$country=="Argentina")] <- mean(data$manure_area[which(data$country=="Argentina")])
data_mean$manure_area[which(data_mean$country=="Australia")] <- mean(data$manure_area[which(data$country=="Australia")])
data_mean$manure_area[which(data_mean$country=="Azerbaijan")] <- mean(data$manure_area[which(data$country=="Azerbaijan")])
data_mean$manure_area[which(data_mean$country=="Bangladesh")] <- mean(data$manure_area[which(data$country=="Bangladesh")])
data_mean$manure_area[which(data_mean$country=="Bolivia_(Plurinational_State_of)")] <- mean(data$manure_area[which(data$country=="Bolivia_(Plurinational_State_of)")])
data_mean$manure_area[which(data_mean$country=="Brazil")] <- mean(data$manure_area[which(data$country=="Brazil")])
data_mean$manure_area[which(data_mean$country=="Cambodia")] <- mean(data$manure_area[which(data$country=="Cambodia")])
data_mean$manure_area[which(data_mean$country=="Canada")] <- mean(data$manure_area[which(data$country=="Canada")])
data_mean$manure_area[which(data_mean$country=="Chile")] <- mean(data$manure_area[which(data$country=="Chile")])
data_mean$manure_area[which(data_mean$country=="China,_mainland")] <- mean(data$manure_area[which(data$country=="China,_mainland")])
data_mean$manure_area[which(data_mean$country=="Colombia")] <- mean(data$manure_area[which(data$country=="Colombia")])
data_mean$manure_area[which(data_mean$country=="Costa_Rica")] <- mean(data$manure_area[which(data$country=="Costa_Rica")])
data_mean$manure_area[which(data_mean$country=="Dominican_Republic")] <- mean(data$manure_area[which(data$country=="Dominican_Republic")])
data_mean$manure_area[which(data_mean$country=="Ecuador")] <- mean(data$manure_area[which(data$country=="Ecuador")])
data_mean$manure_area[which(data_mean$country=="Egypt")] <- mean(data$manure_area[which(data$country=="Egypt")])
data_mean$manure_area[which(data_mean$country=="El_Salvador")] <- mean(data$manure_area[which(data$country=="El_Salvador")])
data_mean$manure_area[which(data_mean$country=="Ethiopia")] <- mean(data$manure_area[which(data$country=="Ethiopia")])
data_mean$manure_area[which(data_mean$country=="Guatemala")] <- mean(data$manure_area[which(data$country=="Guatemala")])
data_mean$manure_area[which(data_mean$country=="Guinea")] <- mean(data$manure_area[which(data$country=="Guinea")])
data_mean$manure_area[which(data_mean$country=="Honduras")] <- mean(data$manure_area[which(data$country=="Honduras")])
data_mean$manure_area[which(data_mean$country=="India")] <- mean(data$manure_area[which(data$country=="India")])
data_mean$manure_area[which(data_mean$country=="Indonesia")] <- mean(data$manure_area[which(data$country=="Indonesia")])
data_mean$manure_area[which(data_mean$country=="Israel")] <- mean(data$manure_area[which(data$country=="Israel")])
data_mean$manure_area[which(data_mean$country=="Japan")] <- mean(data$manure_area[which(data$country=="Japan")])
data_mean$manure_area[which(data_mean$country=="Jordan")] <- mean(data$manure_area[which(data$country=="Jordan")])
data_mean$manure_area[which(data_mean$country=="Kenya")] <- mean(data$manure_area[which(data$country=="Kenya")])
data_mean$manure_area[which(data_mean$country=="Kuwait")] <- mean(data$manure_area[which(data$country=="Kuwait")])
data_mean$manure_area[which(data_mean$country=="Lao_People's_Democratic_Republic")] <- mean(data$manure_area[which(data$country=="Lao_People's_Democratic_Republic")])
data_mean$manure_area[which(data_mean$country=="Lebanon")] <- mean(data$manure_area[which(data$country=="Lebanon")])
data_mean$manure_area[which(data_mean$country=="Madagascar")] <- mean(data$manure_area[which(data$country=="Madagascar")])
data_mean$manure_area[which(data_mean$country=="Malawi")] <- mean(data$manure_area[which(data$country=="Malawi")])
data_mean$manure_area[which(data_mean$country=="Malaysia")] <- mean(data$manure_area[which(data$country=="Malaysia")])
data_mean$manure_area[which(data_mean$country=="Mauritania")] <- mean(data$manure_area[which(data$country=="Mauritania")])
data_mean$manure_area[which(data_mean$country=="Mexico")] <- mean(data$manure_area[which(data$country=="Mexico")])
data_mean$manure_area[which(data_mean$country=="Morocco")] <- mean(data$manure_area[which(data$country=="Morocco")])
data_mean$manure_area[which(data_mean$country=="Myanmar")] <- mean(data$manure_area[which(data$country=="Myanmar")])
data_mean$manure_area[which(data_mean$country=="New_Zealand")] <- mean(data$manure_area[which(data$country=="New_Zealand")])
data_mean$manure_area[which(data_mean$country=="Nicaragua")] <- mean(data$manure_area[which(data$country=="Nicaragua")])
data_mean$manure_area[which(data_mean$country=="Nigeria")] <- mean(data$manure_area[which(data$country=="Nigeria")])
data_mean$manure_area[which(data_mean$country=="Norway")] <- mean(data$manure_area[which(data$country=="Norway")])
data_mean$manure_area[which(data_mean$country=="Pakistan")] <- mean(data$manure_area[which(data$country=="Pakistan")])
data_mean$manure_area[which(data_mean$country=="Paraguay")] <- mean(data$manure_area[which(data$country=="Paraguay")])
data_mean$manure_area[which(data_mean$country=="Philippines")] <- mean(data$manure_area[which(data$country=="Philippines")])
data_mean$manure_area[which(data_mean$country=="Republic_of_Korea")] <- mean(data$manure_area[which(data$country=="Republic_of_Korea")])
data_mean$manure_area[which(data_mean$country=="Republic_of_Moldova")] <- mean(data$manure_area[which(data$country=="Republic_of_Moldova")])
data_mean$manure_area[which(data_mean$country=="Saudi_Arabia")] <- mean(data$manure_area[which(data$country=="Saudi_Arabia")])
data_mean$manure_area[which(data_mean$country=="South_Africa")] <- mean(data$manure_area[which(data$country=="South_Africa")])
data_mean$manure_area[which(data_mean$country=="Sri_Lanka")] <- mean(data$manure_area[which(data$country=="Sri_Lanka")])
data_mean$manure_area[which(data_mean$country=="Switzerland")] <- mean(data$manure_area[which(data$country=="Switzerland")])
data_mean$manure_area[which(data_mean$country=="Syrian_Arab_Republic")] <- mean(data$manure_area[which(data$country=="Syrian_Arab_Republic")])
data_mean$manure_area[which(data_mean$country=="Thailand")] <- mean(data$manure_area[which(data$country=="Thailand")])
data_mean$manure_area[which(data_mean$country=="Togo")] <- mean(data$manure_area[which(data$country=="Togo")])
data_mean$manure_area[which(data_mean$country=="Turkey")] <- mean(data$manure_area[which(data$country=="Turkey")])
data_mean$manure_area[which(data_mean$country=="United_Republic_of_Tanzania")] <- mean(data$manure_area[which(data$country=="United_Republic_of_Tanzania")])
data_mean$manure_area[which(data_mean$country=="United_States_of_America")] <- mean(data$manure_area[which(data$country=="United_States_of_America")])
data_mean$manure_area[which(data_mean$country=="Uruguay")] <- mean(data$manure_area[which(data$country=="Uruguay")])
data_mean$manure_area[which(data_mean$country=="Venezuela_(Bolivarian_Republic_of)")] <- mean(data$manure_area[which(data$country=="Venezuela_(Bolivarian_Republic_of)")])
data_mean$manure_area[which(data_mean$country=="Viet_Nam")] <- mean(data$manure_area[which(data$country=="Viet_Nam")])
data_mean$manure_area[which(data_mean$country=="Zambia")] <- mean(data$manure_area[which(data$country=="Zambia")])
data_mean$manure_area[which(data_mean$country=="Zimbabwe")] <- mean(data$manure_area[which(data$country=="Zimbabwe")])
data_mean$manure_area[which(data_mean$country=="EU27")] <- mean(data$manure_area[which(data$country=="EU27")])
data_mean$manure_area[which(data_mean$country=="Iran_(Islamic_Republic_of)")] <- mean(data$manure_area[which(data$country=="Iran_(Islamic_Republic_of)")])
data_mean$manure_area[which(data_mean$country=="Russian_Federation")] <- mean(data$manure_area[which(data$country=="Russian_Federation")])
data_mean$manure_area[which(data_mean$country=="Ukraine")] <- mean(data$manure_area[which(data$country=="Ukraine")])
data_mean$manure_area[which(data_mean$country=="Uzbekistan")] <- mean(data$manure_area[which(data$country=="Uzbekistan")])
data_mean$manure_area[which(data_mean$country=="Belarus")] <- mean(data$manure_area[which(data$country=="Belarus")])
data_mean$netcap14_area <- NA
data_mean$netcap14_area[which(data_mean$country=="Albania")] <- mean(data$netcap14_area[which(data$country=="Albania")])
data_mean$netcap14_area[which(data_mean$country=="Algeria")] <- mean(data$netcap14_area[which(data$country=="Algeria")])
data_mean$netcap14_area[which(data_mean$country=="Argentina")] <- mean(data$netcap14_area[which(data$country=="Argentina")])
data_mean$netcap14_area[which(data_mean$country=="Australia")] <- mean(data$netcap14_area[which(data$country=="Australia")])
data_mean$netcap14_area[which(data_mean$country=="Azerbaijan")] <- mean(data$netcap14_area[which(data$country=="Azerbaijan")])
data_mean$netcap14_area[which(data_mean$country=="Bangladesh")] <- mean(data$netcap14_area[which(data$country=="Bangladesh")])
data_mean$netcap14_area[which(data_mean$country=="Bolivia_(Plurinational_State_of)")] <- mean(data$netcap14_area[which(data$country=="Bolivia_(Plurinational_State_of)")])
data_mean$netcap14_area[which(data_mean$country=="Brazil")] <- mean(data$netcap14_area[which(data$country=="Brazil")])
data_mean$netcap14_area[which(data_mean$country=="Cambodia")] <- mean(data$netcap14_area[which(data$country=="Cambodia")])
data_mean$netcap14_area[which(data_mean$country=="Canada")] <- mean(data$netcap14_area[which(data$country=="Canada")])
data_mean$netcap14_area[which(data_mean$country=="Chile")] <- mean(data$netcap14_area[which(data$country=="Chile")])
data_mean$netcap14_area[which(data_mean$country=="China,_mainland")] <- mean(data$netcap14_area[which(data$country=="China,_mainland")])
data_mean$netcap14_area[which(data_mean$country=="Colombia")] <- mean(data$netcap14_area[which(data$country=="Colombia")])
data_mean$netcap14_area[which(data_mean$country=="Costa_Rica")] <- mean(data$netcap14_area[which(data$country=="Costa_Rica")])
data_mean$netcap14_area[which(data_mean$country=="Dominican_Republic")] <- mean(data$netcap14_area[which(data$country=="Dominican_Republic")])
data_mean$netcap14_area[which(data_mean$country=="Ecuador")] <- mean(data$netcap14_area[which(data$country=="Ecuador")])
data_mean$netcap14_area[which(data_mean$country=="Egypt")] <- mean(data$netcap14_area[which(data$country=="Egypt")])
data_mean$netcap14_area[which(data_mean$country=="El_Salvador")] <- mean(data$netcap14_area[which(data$country=="El_Salvador")])
data_mean$netcap14_area[which(data_mean$country=="Ethiopia")] <- mean(data$netcap14_area[which(data$country=="Ethiopia")])
data_mean$netcap14_area[which(data_mean$country=="Guatemala")] <- mean(data$netcap14_area[which(data$country=="Guatemala")])
data_mean$netcap14_area[which(data_mean$country=="Guinea")] <- mean(data$netcap14_area[which(data$country=="Guinea")])
data_mean$netcap14_area[which(data_mean$country=="Honduras")] <- mean(data$netcap14_area[which(data$country=="Honduras")])
data_mean$netcap14_area[which(data_mean$country=="India")] <- mean(data$netcap14_area[which(data$country=="India")])
data_mean$netcap14_area[which(data_mean$country=="Indonesia")] <- mean(data$netcap14_area[which(data$country=="Indonesia")])
data_mean$netcap14_area[which(data_mean$country=="Israel")] <- mean(data$netcap14_area[which(data$country=="Israel")])
data_mean$netcap14_area[which(data_mean$country=="Japan")] <- mean(data$netcap14_area[which(data$country=="Japan")])
data_mean$netcap14_area[which(data_mean$country=="Jordan")] <- mean(data$netcap14_area[which(data$country=="Jordan")])
data_mean$netcap14_area[which(data_mean$country=="Kenya")] <- mean(data$netcap14_area[which(data$country=="Kenya")])
data_mean$netcap14_area[which(data_mean$country=="Kuwait")] <- mean(data$netcap14_area[which(data$country=="Kuwait")])
data_mean$netcap14_area[which(data_mean$country=="Lao_People's_Democratic_Republic")] <- mean(data$netcap14_area[which(data$country=="Lao_People's_Democratic_Republic")])
data_mean$netcap14_area[which(data_mean$country=="Lebanon")] <- mean(data$netcap14_area[which(data$country=="Lebanon")])
data_mean$netcap14_area[which(data_mean$country=="Madagascar")] <- mean(data$netcap14_area[which(data$country=="Madagascar")])
data_mean$netcap14_area[which(data_mean$country=="Malawi")] <- mean(data$netcap14_area[which(data$country=="Malawi")])
data_mean$netcap14_area[which(data_mean$country=="Malaysia")] <- mean(data$netcap14_area[which(data$country=="Malaysia")])
data_mean$netcap14_area[which(data_mean$country=="Mauritania")] <- mean(data$netcap14_area[which(data$country=="Mauritania")])
data_mean$netcap14_area[which(data_mean$country=="Mexico")] <- mean(data$netcap14_area[which(data$country=="Mexico")])
data_mean$netcap14_area[which(data_mean$country=="Morocco")] <- mean(data$netcap14_area[which(data$country=="Morocco")])
data_mean$netcap14_area[which(data_mean$country=="Myanmar")] <- mean(data$netcap14_area[which(data$country=="Myanmar")])
data_mean$netcap14_area[which(data_mean$country=="New_Zealand")] <- mean(data$netcap14_area[which(data$country=="New_Zealand")])
data_mean$netcap14_area[which(data_mean$country=="Nicaragua")] <- mean(data$netcap14_area[which(data$country=="Nicaragua")])
data_mean$netcap14_area[which(data_mean$country=="Nigeria")] <- mean(data$netcap14_area[which(data$country=="Nigeria")])
data_mean$netcap14_area[which(data_mean$country=="Norway")] <- mean(data$netcap14_area[which(data$country=="Norway")])
data_mean$netcap14_area[which(data_mean$country=="Pakistan")] <- mean(data$netcap14_area[which(data$country=="Pakistan")])
data_mean$netcap14_area[which(data_mean$country=="Paraguay")] <- mean(data$netcap14_area[which(data$country=="Paraguay")])
data_mean$netcap14_area[which(data_mean$country=="Philippines")] <- mean(data$netcap14_area[which(data$country=="Philippines")])
data_mean$netcap14_area[which(data_mean$country=="Republic_of_Korea")] <- mean(data$netcap14_area[which(data$country=="Republic_of_Korea")])
data_mean$netcap14_area[which(data_mean$country=="Republic_of_Moldova")] <- mean(data$netcap14_area[which(data$country=="Republic_of_Moldova")])
data_mean$netcap14_area[which(data_mean$country=="Saudi_Arabia")] <- mean(data$netcap14_area[which(data$country=="Saudi_Arabia")])
data_mean$netcap14_area[which(data_mean$country=="South_Africa")] <- mean(data$netcap14_area[which(data$country=="South_Africa")])
data_mean$netcap14_area[which(data_mean$country=="Sri_Lanka")] <- mean(data$netcap14_area[which(data$country=="Sri_Lanka")])
data_mean$netcap14_area[which(data_mean$country=="Switzerland")] <- mean(data$netcap14_area[which(data$country=="Switzerland")])
data_mean$netcap14_area[which(data_mean$country=="Syrian_Arab_Republic")] <- mean(data$netcap14_area[which(data$country=="Syrian_Arab_Republic")])
data_mean$netcap14_area[which(data_mean$country=="Thailand")] <- mean(data$netcap14_area[which(data$country=="Thailand")])
data_mean$netcap14_area[which(data_mean$country=="Togo")] <- mean(data$netcap14_area[which(data$country=="Togo")])
data_mean$netcap14_area[which(data_mean$country=="Turkey")] <- mean(data$netcap14_area[which(data$country=="Turkey")])
data_mean$netcap14_area[which(data_mean$country=="United_Republic_of_Tanzania")] <- mean(data$netcap14_area[which(data$country=="United_Republic_of_Tanzania")])
data_mean$netcap14_area[which(data_mean$country=="United_States_of_America")] <- mean(data$netcap14_area[which(data$country=="United_States_of_America")])
data_mean$netcap14_area[which(data_mean$country=="Uruguay")] <- mean(data$netcap14_area[which(data$country=="Uruguay")])
data_mean$netcap14_area[which(data_mean$country=="Venezuela_(Bolivarian_Republic_of)")] <- mean(data$netcap14_area[which(data$country=="Venezuela_(Bolivarian_Republic_of)")])
data_mean$netcap14_area[which(data_mean$country=="Viet_Nam")] <- mean(data$netcap14_area[which(data$country=="Viet_Nam")])
data_mean$netcap14_area[which(data_mean$country=="Zambia")] <- mean(data$netcap14_area[which(data$country=="Zambia")])
data_mean$netcap14_area[which(data_mean$country=="Zimbabwe")] <- mean(data$netcap14_area[which(data$country=="Zimbabwe")])
data_mean$netcap14_area[which(data_mean$country=="EU27")] <- mean(data$netcap14_area[which(data$country=="EU27")])
data_mean$netcap14_area[which(data_mean$country=="Iran_(Islamic_Republic_of)")] <- mean(data$netcap14_area[which(data$country=="Iran_(Islamic_Republic_of)")])
data_mean$netcap14_area[which(data_mean$country=="Russian_Federation")] <- mean(data$netcap14_area[which(data$country=="Russian_Federation")])
data_mean$netcap14_area[which(data_mean$country=="Ukraine")] <- mean(data$netcap14_area[which(data$country=="Ukraine")])
data_mean$netcap14_area[which(data_mean$country=="Uzbekistan")] <- mean(data$netcap14_area[which(data$country=="Uzbekistan")])
data_mean$netcap14_area[which(data_mean$country=="Belarus")] <- mean(data$netcap14_area[which(data$country=="Belarus")])
count(data$country)
data_mean$weight <- NA
data_mean$weight[which(data_mean$country=="Albania")] <- "1"
data_mean$weight[which(data_mean$country=="Algeria")] <- "1"
data_mean$weight[which(data_mean$country=="Argentina")] <- "4"
data_mean$weight[which(data_mean$country=="Australia")] <- "4"
data_mean$weight[which(data_mean$country=="Azerbaijan")] <- "1"
data_mean$weight[which(data_mean$country=="Bangladesh")] <- "4"
data_mean$weight[which(data_mean$country=="Bolivia_(Plurinational_State_of)")] <- "1"
data_mean$weight[which(data_mean$country=="Brazil")] <- "4"
data_mean$weight[which(data_mean$country=="Cambodia")] <- "1"
data_mean$weight[which(data_mean$country=="Canada")] <- "4"
data_mean$weight[which(data_mean$country=="Chile")] <- "4"
data_mean$weight[which(data_mean$country=="China,_mainland")] <- "4"
data_mean$weight[which(data_mean$country=="Colombia")] <- "1"
data_mean$weight[which(data_mean$country=="Costa_Rica")] <- "1"
data_mean$weight[which(data_mean$country=="Dominican_Republic")] <- "1"
data_mean$weight[which(data_mean$country=="Ecuador")] <- "1"
data_mean$weight[which(data_mean$country=="Egypt")] <- "4"
data_mean$weight[which(data_mean$country=="El_Salvador")] <- "1"
data_mean$weight[which(data_mean$country=="Ethiopia")] <- "1"
data_mean$weight[which(data_mean$country=="Guatemala")] <- "1"
data_mean$weight[which(data_mean$country=="Guinea")] <- "1"
data_mean$weight[which(data_mean$country=="Honduras")] <- "1"
data_mean$weight[which(data_mean$country=="India")] <- "4"
data_mean$weight[which(data_mean$country=="Indonesia")] <- "4"
data_mean$weight[which(data_mean$country=="Israel")] <- "3"
data_mean$weight[which(data_mean$country=="Japan")] <- "4"
data_mean$weight[which(data_mean$country=="Jordan")] <- "1"
data_mean$weight[which(data_mean$country=="Kenya")] <- "1"
data_mean$weight[which(data_mean$country=="Kuwait")] <- "1"
data_mean$weight[which(data_mean$country=="Lao_People's_Democratic_Republic")] <- "1"
data_mean$weight[which(data_mean$country=="Lebanon")] <- "1"
data_mean$weight[which(data_mean$country=="Madagascar")] <- "1"
data_mean$weight[which(data_mean$country=="Malawi")] <- "1"
data_mean$weight[which(data_mean$country=="Malaysia")] <- "4"
data_mean$weight[which(data_mean$country=="Mauritania")] <- "1"
data_mean$weight[which(data_mean$country=="Mexico")] <- "4"
data_mean$weight[which(data_mean$country=="Morocco")] <- "4"
data_mean$weight[which(data_mean$country=="Myanmar")] <- "1"
data_mean$weight[which(data_mean$country=="New_Zealand")] <- "2"
data_mean$weight[which(data_mean$country=="Nicaragua")] <- "1"
data_mean$weight[which(data_mean$country=="Nigeria")] <- "1"
data_mean$weight[which(data_mean$country=="Norway")] <- "1"
data_mean$weight[which(data_mean$country=="Pakistan")] <- "4"
data_mean$weight[which(data_mean$country=="Paraguay")] <- "1"
data_mean$weight[which(data_mean$country=="Philippines")] <- "4"
data_mean$weight[which(data_mean$country=="Republic_of_Korea")] <- "1"
data_mean$weight[which(data_mean$country=="Republic_of_Moldova")] <- "1"
data_mean$weight[which(data_mean$country=="Saudi_Arabia")] <- "1"
data_mean$weight[which(data_mean$country=="South_Africa")] <- "4"
data_mean$weight[which(data_mean$country=="Sri_Lanka")] <- "1"
data_mean$weight[which(data_mean$country=="Switzerland")] <- "1"
data_mean$weight[which(data_mean$country=="Syrian_Arab_Republic")] <- "1"
data_mean$weight[which(data_mean$country=="Thailand")] <- "4"
data_mean$weight[which(data_mean$country=="Togo")] <- "1"
data_mean$weight[which(data_mean$country=="Turkey")] <- "4"
data_mean$weight[which(data_mean$country=="United_Republic_of_Tanzania")] <- "1"
data_mean$weight[which(data_mean$country=="United_States_of_America")] <- "4"
data_mean$weight[which(data_mean$country=="Uruguay")] <- "1"
data_mean$weight[which(data_mean$country=="Venezuela_(Bolivarian_Republic_of)")] <- "1"
data_mean$weight[which(data_mean$country=="Viet_Nam")] <- "4"
data_mean$weight[which(data_mean$country=="Zambia")] <- "1"
data_mean$weight[which(data_mean$country=="Zimbabwe")] <- "1"
data_mean$weight[which(data_mean$country=="EU27")] <- "4"
data_mean$weight[which(data_mean$country=="Iran_(Islamic_Republic_of)")] <- "3"
data_mean$weight[which(data_mean$country=="Russian_Federation")] <- "3"
data_mean$weight[which(data_mean$country=="Ukraine")] <- "3"
data_mean$weight[which(data_mean$country=="Uzbekistan")] <- "3"
data_mean$weight[which(data_mean$country=="Belarus")] <- "2"
write.csv(data_mean,file="data_mean.csv")
|
3c9a876d184d68e21f5edfeb97793653fae0c565
|
c988e9ecfaac555bd13f5b83ae30a04d30253ad9
|
/Transcriptomic_analysis/createDesignMatrices.R
|
f23c75d89dca139d4bc31eb3716426dd241ffc61
|
[] |
no_license
|
marcoGarranzo206/TFM_UAM
|
fc0e026ce72823ebb4f0810c7e8434141e807e80
|
79e75c5afc4c843ad53ab049b870c26f8bb124c6
|
refs/heads/master
| 2023-08-11T20:27:35.698405
| 2021-09-16T22:18:06
| 2021-09-16T22:18:06
| 292,009,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,897
|
r
|
createDesignMatrices.R
|
# each design has its own function
# lots of code repetition for the special cases
# generateSpecialDesign.R has one for all the special types
# metaDataFile = "~/Marco_TFM/Marco_TFM/Data/metaData/GSE111073sample_info.txt"
# isPaired = T
# intercept = F
library(stringr)
generateFormula <- function(dfName = "", varnames, intercept = F){
if(intercept){
formula <- "~1+"
}else{
formula <- "~0+"
}
i = 0
for(var in varnames){
if( i == 0){
formula <- paste(formula, dfName,"$", var,sep = "")
}else{
formula <- paste(formula, " + ",dfName,"$", var, sep = "")
}
i <- i + 1
}
return(as.formula(formula))
}
generateStandardDesignMatrix <- function(meta, isPaired = F, intercept = F, ...){
if(is.character(meta) & length(meta) == 1){
varData <- read.table(meta, header = TRUE, ...)
}else if (is.data.frame(meta)){
varData <- meta
}else{
print("meta must be a file path or data frame")
return
}
if(isPaired | "Id" %in% colnames(varData)){
varData$Id <- as.factor(varData$Id)
}
n <- grep("treatment", colnames(varData))
if (n != 1){
if (n == ncol(varData)){
varData <- varData[ , rev(colnames(varData))]
}else if (n == (ncol(varData) - 1)){
varData <- data.frame(varData[,n,drop = F], varData[,1:n-1,drop = F], varData[,n+1,drop = F])
}else{
varData <- data.frame(varData[,n,drop = F], varData[,1:n-1,drop = F], varData[,n+1:ncol(varData),drop = F])
}
}
assign("varDataG", varData, envir = .GlobalEnv) #needed because model.matrix looks in the global environment
designMatrix <- model.matrix( generateFormula("varDataG", colnames(varData), intercept = intercept))
colnames(designMatrix)
rownames(designMatrix) <- rownames(varData)
colnames(designMatrix) <- vapply(strsplit(colnames(designMatrix), "\\$" ), function(x) make.names(x[2]), FUN.VALUE = character(1))
#generate design and contrast matrix of what we will call "simple" studies
#That is:
#studies, which, for each compound we only have one dose/time point
#other categorical values can exist, and their effect will be removed with limma
treatmentNames <- colnames(designMatrix)[grep("^treatment.*", colnames(designMatrix), ignore.case = T)]
t <- treatmentNames[treatmentNames != "treatmentcontrol"]
contMatrix <- matrix(0, nrow = ncol(designMatrix), ncol = length(t),dimnames = list(colnames(designMatrix), t) )
contMatrix["treatmentcontrol", ] <- -1
for(i in t){
contMatrix[i, i] <- 1
}
ret <-list()
ret$designMatrix <- designMatrix
ret$contMatrix <- contMatrix
return(ret)
}
# diff time points, each w/a control
generateDesignTimePoint <- function(metaDataFile, isPaired = F, intercept = F, varName= "time point",...){
## diff time points
## no 0 time point
## each time point has its own control
varData <- read.table(metaDataFile, header = TRUE,...)
varData <- data.frame(vapply(X=varData, FUN = function(x) make.names(x), FUN.VALUE = character(nrow(varData)))
,row.names = row.names(varData))
varName <- make.names(varName)
varData$treatment <- str_replace_all(varData$treatment, pattern = "\\.", "_")
if(isPaired | "Id" %in% colnames(varData)){
varData$Id <- as.factor(varData$Id)
}
treatmentVar <- paste(varData$treatment, varData[ , varName], sep = ".")
newVarData <- data.frame( treatment = treatmentVar, varData[,!colnames(varData) %in% c("treatment", varName), drop = F],
stringsAsFactors = F)
assign("newVarDataG", newVarData, envir = .GlobalEnv)
designMatrix <- model.matrix( generateFormula("newVarDataG", colnames(newVarData), intercept = F) )
colnames(designMatrix) <- vapply(strsplit(colnames(designMatrix), "\\$" ), function(x) make.names(x[2]), FUN.VALUE = character(1))
rownames(designMatrix) <- rownames(newVarData)
treatment <- unique(varData$treatment[varData$treatment != "control"])
timepoints <- unique( varData[, varName])
contMatrix <- matrix( data = 0, nrow = ncol(designMatrix), ncol = length(treatment)*length(timepoints))
colnames(contMatrix) <- paste(rep(treatment, each = length(timepoints)), timepoints,sep = ".")
rownames(contMatrix) <- colnames(designMatrix)
for(co in colnames(contMatrix)){
col_split <- strsplit(co, "\\.")[[1]]
tp <- paste(col_split[2:length(col_split)], collapse = ".")
control <- paste("treatmentcontrol", tp, sep = ".")
treat <- paste("treatment", co, sep = "")
contMatrix[treat, co] <- 1
contMatrix[control, co] <- -1
}
ret <-list()
ret$designMatrix <- designMatrix
ret$contMatrix <- contMatrix
return(ret)
}
generateDesign0asControl <- function(metaDataFile, isPaired = F, varName = "timepoint",...){
#diff time points/conc, 0 is control
#control can be shared across treatments or not
varData <- read.table(metaDataFile, header = TRUE, ...)
varData <- data.frame(vapply(X=varData, FUN = function(x) make.names(x), FUN.VALUE = character(nrow(varData)))
,row.names = row.names(varData))
varData$treatment <- str_replace_all(varData$treatment, pattern = "\\.", "_")
varName <- make.names(varName)
if(isPaired | "Id" %in% colnames(varData)){
varData$Id <- as.factor(varData$Id)
}
treatmentVar <- paste(varData$treatment, varData[ , varName], sep = ".")
newVarData <- data.frame( treatment = treatmentVar, varData[,!colnames(varData) %in% c("treatment", varName), drop = F],
stringsAsFactors = F)
assign("newVarDataG", newVarData, envir = .GlobalEnv)
designMatrix <- model.matrix( generateFormula("newVarDataG", colnames(newVarData), intercept = F) )
colnames(designMatrix) <- vapply(strsplit(colnames(designMatrix), "\\$" ), function(x) make.names(x[2]), FUN.VALUE = character(1))
rownames(designMatrix) <- rownames(newVarData)
treatmentCompounds <- unique(varData$treatment)
print(treatmentCompounds)
print("control" %in% treatmentCompounds)
if("control" %in% treatmentCompounds){
treatmentCompounds <- treatmentCompounds[treatmentCompounds != "control"]
treatmentVars <- unique(varData[, varName][varData[, varName] != "X0"])
contMatrix <- matrix( data = 0, nrow = ncol(designMatrix), ncol = length(treatmentCompounds)*length(treatmentVars))
colnames(contMatrix) <- paste(rep(treatmentCompounds, each = length(treatmentVars)), treatmentVars,sep = ".")
rownames(contMatrix) <- colnames(designMatrix) #already has
contMatrix["treatmentcontrol.X0", ] <- -1
for(co in colnames(contMatrix)){
treat <- paste("treatment", co, sep = "")
contMatrix[treat, co] <- 1
}
}else{
#diff conc for each compound: redo for everything!
contMatrix <- matrix( data = 0, nrow = ncol(designMatrix), ncol = length(unique(treatmentVar)) - length(treatmentCompounds))
colnames(contMatrix) <- unique(treatmentVar[-grep(pattern = "X0$", x = treatmentVar)])
rownames(contMatrix) <- colnames(designMatrix) #already has
for(co in colnames(contMatrix)){
control <- sub("\\..*", ".X0", co)
control <- paste("treatment", control, sep = "")
treat <- paste("treatment", co, sep = "")
contMatrix[treat, co] <- 1
contMatrix[control, co] <- -1
}
}
ret <-list()
ret$designMatrix <- designMatrix
ret$contMatrix <- contMatrix
return(ret)
}
generateDesignBeforeAfter <- function(metaDataFile, isPaired = F, varName = "time point", ...){
#before after studies
varData <- read.table(metaDataFile, header = TRUE, ...)
varData <- data.frame(vapply(X=varData, FUN = function(x) make.names(x), FUN.VALUE = character(nrow(varData)))
,row.names = row.names(varData), stringsAsFactors = F)
#in case some treatment has a dot due to bad naming, change to _, otherwise will interfere
#with value assingmen in contMatrix
varData$treatment <- str_replace_all(varData$treatment, pattern = "\\.", "_")
varName <- make.names(varName)
if(isPaired | "Id" %in% colnames(varData)){
varData$Id <- as.factor(varData$Id)
}
treatmentVar <- paste(varData$treatment, varData[ , varName], sep = ".")
newVarData <- data.frame( treatment = treatmentVar, varData[,!colnames(varData) %in% c("treatment", varName), drop = F],
stringsAsFactors = F)
assign("newVarDataG", newVarData, envir = .GlobalEnv)
designMatrix <- model.matrix( generateFormula("newVarDataG", colnames(newVarData), intercept = F) )
colnames(designMatrix) <- vapply(strsplit(colnames(designMatrix), "\\$" ), function(x) make.names(x[2]), FUN.VALUE = character(1))
rownames(designMatrix) <- rownames(newVarData)
treatment <- unique(newVarData$treatment)
treatment <- treatment[-c(grep(pattern = "^control", x = treatment), grep(pattern = "X0$", x = treatment))]
timepoints <- unique( varData[, varName])
timepoints <- timepoints[which(timepoints != "X0")]
contMatrix <- matrix( data = 0, nrow = ncol(designMatrix), ncol = length(treatment))
colnames(contMatrix) <- treatment
rownames(contMatrix) <- colnames(designMatrix)
contMatrix["treatmentcontrol.X0", ] <- 1 #here control means placebo
for(co in colnames(contMatrix)){
col_split <- strsplit(co, "\\.")[[1]]
tr <- col_split[1]
tp <- paste(col_split[2:length(col_split)], collapse = ".")
control <- paste("treatment", tr, ".X0", sep = "") #here control refers to time point 0
case <- paste("treatment", co, sep = "")
contMatrix[case, co] <- 1
if(control %in% row.names(contMatrix)){
#sometimes we dont have a time point 0 for a given condition
#which os not ideal
#but here we just treat those case as if it were a diff time point
#study, with the benefit that control has its time
# effect 0 subtracted
contMatrix[control, co] <- -1
}
contMatrix[paste("treatmentcontrol", tp, sep = "."), co] <- -1
}
ret <-list()
ret$designMatrix <- designMatrix
ret$contMatrix <- contMatrix
return(ret)
}
contMatrixFromDesign <- function(designMatrix, varsep = "_"){
treatmentColumns <- colnames(designMatrix)[grep(pattern = paste("^treatment", varsep, sep = ""), colnames(designMatrix))]
t <- treatmentColumns[grep("control$", treatmentColumns, invert = T)]
contMatrix <- matrix(rep(0,times = ncol(designMatrix)*length(t)), nrow = ncol(designMatrix) )
colnames(contMatrix) <- t
rownames(contMatrix) <- colnames(designMatrix)
colTreat <- paste("treatment",varsep,"control", sep = "")
if( colTreat %in% colnames(designMatrix)){
contMatrix[colTreat, ] <- -1
}
for(condition in t){
#print(condition)
contMatrix[condition, condition] <- 1
}
ret <- list()
ret$designMatrix <- designMatrix
ret$contMatrix <- contMatrix
return(ret)
}
|
9039bd969f7fa91e7f7b67a3f326e35a031857ff
|
642fbb157e400bcc4811b0a5c14b8255e16b5db8
|
/R/aggregate_rating.R
|
cc4424775cd23f544947514497e03ae6bd4aeb09
|
[] |
no_license
|
lucasgautheron/ChildRecordsR
|
6f048655f281465b298c017ac2d56d996a2ab6f0
|
afd1e7861095583664c47678b1fe46c3555ac33d
|
refs/heads/main
| 2023-04-18T19:44:49.981733
| 2021-04-02T16:57:00
| 2021-04-02T16:57:00
| 359,896,932
| 0
| 0
| null | 2021-04-20T17:22:02
| 2021-04-20T17:22:01
| null |
UTF-8
|
R
| false
| false
| 3,753
|
r
|
aggregate_rating.R
|
#' aggregation of annnotations data
#'
#' Base on the result of an find.ratting.segment return or similar data.frame
#' the function will extract data from annotation file in a raterData Class
#'
#' the data will be organize un raw annotation format and a long segmented format
#'
#' @param ChildRecordings : a ChildRecordings class
#' @param data : find.ratting.segment return or similar data.frame
#' @param cut : time size in millisecond for the unit segment
#'
#' @return A raterData class containing with original format and long format for every annotators.
#'
#' @examples
#' library(ChildRecordsR)
#' path = "/mnt/94707AA4707A8CAC/CNRS/corpus/vandam-daylong-demo"
#' CR = ChildRecordings(path)
#'
#' # if no time windows is specified, this function will only return at table for all the know raters
#' # All the rater need to ratter any segment find
#' search = find.rating.segment(CR, "BN32_010007.mp3")
#' rez = aggregate.rating(search, CR, cut=100, verbose=T)
#'
aggregate.rating <- function(data, ChildRecordings, cut=100,verbose=T){
if(!is(ChildRecordings, "ChildRecordings")){
print(paste( substitute(ChildRecordings), "is not a ChildRecordings class"))
return(NULL)
}
# attach(data)
data <- data[order(data$recording_filename,data$set,data$true_onset),]
# detach(data)
all.meta <- ChildRecordings$all.meta
ratersID <- as.character(unique(data$set))
rater <- list()
### init progress bar
start <- Sys.time()
Nrow <- 1
for(rat in ratersID){
tmp.data <- data[data$set==rat,]
raw_files <- data.frame()
long_files <- data.frame()
for (row in 1:nrow(tmp.data)){
row <- tmp.data[row,]
annotation_filename <- row$annotation_filename
true_onset <- row$true_onset
true_offset <- row$true_offset
meta.row <- all.meta[all.meta$annotation_filename==annotation_filename & all.meta$set==rat,]
raw_file <- file.opener(meta.row,ChildRecordings)
long_file <- convertor_long_cut(raw_file,true_onset,true_offset,cut=cut)
long_file <- data_to_OneHotEnc(long_file)
raw_files<-rbind(raw_files,raw_file[raw_file$segment_onset>=true_onset & raw_file$segment_offset<=true_offset,])
long_files <- rbind(long_files,long_file)
### Progress bar
if(verbose){
t <- Sys.time()
extra <- nchar('||100%')
width <- options(width = 80)$width
step <- round(Nrow / nrow(data) * (width - extra))
step.time <- as.numeric(difftime(t, start, units = "secs")/Nrow)
est.duration = step.time*nrow(data)/60
est.remain=step.time*(nrow(data)-Nrow)/60
text <- sprintf('|%s%s|% 3s%% time by step : %ss estimate duration : %sm remain : %sm', strrep('=', step),
strrep(' ', width - step - extra), round(Nrow / nrow(data) * 100),
round(step.time) ,
round(est.duration),
round(est.remain))
cat(text,"\n")
Nrow = Nrow + 1
}
###
}
rater[[rat]]$raw_file <- raw_files
rater[[rat]]$long_file <- long_files
}
value <- list(
rater= rater,
args = list(ratersID = ratersID,
cut = cut,
search = data)
)
attr(value, "class") <- "raterData"
print.raterData(value)
invisible(value)
}
print.raterData <- function(raterData){
### Print results
recording.length <- sum(raterData$args$search$true_offset -raterData$args$search$true_onset)
cat("number of annotators", length(raterData$args$ratersID),"\n")
cat("length of recording annotation for each annotator ", recording.length/length(raterData$args$ratersID),"ms or ", recording.length/length(raterData$args$ratersID)/3600000, "hours\n\n")
}
|
b14e4fffe6d57089b5cd25896405bdaea56c56e2
|
c1672f9c39b9b74b7fd43c90e36557f37db0e009
|
/R/punk_randoms.R
|
53f1439b0dc21bc709e25c7357e941f0f231b351
|
[
"MIT"
] |
permissive
|
ThinkR-open/punkapi
|
a6fcac96e3be9f406b056d9cddc9347940c1dd8e
|
28ad8004d895e7565fb94ce4fd3e7b5d3f8a6e95
|
refs/heads/main
| 2023-01-04T08:50:51.360832
| 2020-10-30T08:14:38
| 2020-10-30T08:14:38
| 307,626,787
| 0
| 0
|
NOASSERTION
| 2020-10-29T15:56:12
| 2020-10-27T08:01:28
|
R
|
UTF-8
|
R
| false
| false
| 664
|
r
|
punk_randoms.R
|
#' Get a Random Beer
#'
#' @return a dataframe with plenty of information about a random famous beer
#'
#' @param n number of element to return
#'
#' @export
#' @importFrom httr GET content
#' @importFrom purrr map_df
#'
#' @rdname punk_random
#'
#' @examples
#' punk_random()
#' punk_randoms(7)
punk_random <- function() {
res <- GET("https://api.punkapi.com/v2/beers/random")
check_results(res)
ct <- content(res)[[1]]
parse_result(ct)
}
slow_punk_random <- purrr::slowly(
punk_random,
rate = purrr::rate_delay(1)
)
#' @export
#' @rdname punk_random
punk_randoms <- function(n = 5) {
map_df(
1:5,
~ slow_punk_random()
)
}
|
739ddc143debd7dca77c25b98e0e896cecc3ca26
|
d261b279eb86bec7fb15925bc50e1f9465ce369c
|
/man/whitespace_tokenize.Rd
|
1f282aec734fab9b09aaf899d5d4e7dedb21c4cb
|
[
"Apache-2.0"
] |
permissive
|
jonathanbratt/RBERT
|
e3ac2c7169b203552c20ed7369fb4e6041b7ab5c
|
d32c3a7b2cce0ce4fb93f64eae9e3f7e85cc6158
|
refs/heads/master
| 2023-02-06T14:25:06.342274
| 2023-01-25T17:05:49
| 2023-01-25T17:05:49
| 204,560,591
| 144
| 21
|
Apache-2.0
| 2023-01-25T17:05:52
| 2019-08-26T20:58:56
|
R
|
UTF-8
|
R
| false
| true
| 507
|
rd
|
whitespace_tokenize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tokenization.R
\name{whitespace_tokenize}
\alias{whitespace_tokenize}
\title{Run basic whitespace cleaning and splitting on a piece of text.}
\usage{
whitespace_tokenize(text)
}
\arguments{
\item{text}{Character scalar to tokenize.}
}
\value{
Character vector of tokens.
}
\description{
Run basic whitespace cleaning and splitting on a piece of text.
}
\examples{
whitespace_tokenize(text = " some\ttext \n with whitespace ")
}
|
ef5e4245b831b1970d19295afd6de72fddf560ac
|
18beba89bd528840d3aab7a171fa671c5ac0cf3a
|
/man/MixtureModel_Biv.Rd
|
c199a85f9cf7c9e88fc47abcb4e565462ea59d19
|
[] |
no_license
|
mpru/BIMEGA
|
8d748401ad29f252c9c87b6ec04bca2d185d9a62
|
6b445dc7581a2b78aae559b34c382a2f74d1391f
|
refs/heads/master
| 2021-01-22T17:33:56.718268
| 2016-06-19T04:21:29
| 2016-06-19T04:21:29
| 61,449,411
| 0
| 0
| null | 2016-06-18T20:55:01
| 2016-06-18T19:34:43
|
R
|
UTF-8
|
R
| false
| true
| 1,672
|
rd
|
MixtureModel_Biv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIMEGA.R
\name{MixtureModel_Biv}
\alias{MixtureModel_Biv}
\title{The MixtureModel_Biv function}
\usage{
MixtureModel_Biv(METcancer, METnormal, MAcancer, MAnormal = NULL,
FunctionalGenes, NoNormalMode = FALSE)
}
\arguments{
\item{METcancer}{matrix with methylation data for cancer samples (genes in rows, samples in columns).}
\item{METnormal}{matrix with methylation data for normal samples (genes in rows, samples in columns).}
\item{MAcancer}{matrix with gene expression data for cancer samples (genes in rows, samples in columns).}
\item{MAnormal}{optional matrix with gene expression data for normal samples (genes in rows, samples in columns).}
\item{FunctionalGenes}{vector with genes names to be considered for the mixture models.}
\item{NoNormalMode}{logical, if TRUE no comparison to normal samples is performed. Defaults to FALSE.}
}
\value{
MethylationStates matrix of DM values, with driver genes in the rows and samples in the columns.
NrComponents matrix with the number of components identified for each driver gene.
Models list with the mixture model fitted for each driver gene.
MethylationDrivers character vector with the genes found by BIMEGA as differentially methylated and transcriptionally predictive (driver genes).
MixtureStates a list with a matrix for each driver gene containing the DM values.
Classifications a vector indicating to which component each sample was assigned.
}
\description{
Internal. Prepares all the structures to store the results and calls in a foreach loop a function that fits the mixture model in each gene.
}
\keyword{internal}
|
945a399319e5e1aa591a5f6a2686fa3b9b0d971c
|
2aa3b7455f3e17c8dbdc938f3386dae46a12bb99
|
/man/gs_auth.Rd
|
5862f43d1864f0cef6ddda30775cdc878f3a02e5
|
[
"MIT"
] |
permissive
|
dennistseng/googlesheets
|
5fd4d78994251df7703754375d5aab1a21686a7a
|
8471679c621c8b5d43679cf4b9598cba2f6a3560
|
refs/heads/master
| 2021-01-18T02:17:34.149067
| 2015-06-02T06:36:06
| 2015-06-02T06:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 965
|
rd
|
gs_auth.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/gs_auth.R
\name{gs_auth}
\alias{gs_auth}
\title{Authorize \code{googlesheets}}
\usage{
gs_auth(new_user = FALSE, token = NULL)
}
\arguments{
\item{new_user}{logical, defaults to \code{FALSE}. Set to \code{TRUE} if you
want to wipe the slate clean and re-authenticate with the same or different
Google account.}
\item{token}{path to a valid token; intended primarily for internal use}
}
\description{
Authorize \code{googlesheets} to access your Google user data. You will be
directed to a web browser, asked to sign in to your Google account, and to
grant \code{googlesheets} access to user data for Google Spreadsheets and
Google Drive. These user credentials are cached in a file named
\code{.httr-oauth} in the current working directory.
}
\details{
Based on
\href{https://github.com/hadley/httr/blob/master/demo/oauth2-google.r}{this
demo} from \code{\link[httr]{httr}}.
}
|
7c5621a106312ee7f303da14b4f1f1a51ca80b38
|
335919e0547cf90682f805c25bf242aa5f58a792
|
/R/app_ui.R
|
1ec44ccd18bd8c02d140ecc60d55541750b0b15d
|
[
"MIT"
] |
permissive
|
Gustavogaep/deminR
|
80e677b8fe955d3282e9aa1a9e218fd2489f1a6e
|
e0bf6656ce9c55a5c8379a6628f4e39437588a66
|
refs/heads/master
| 2022-04-24T15:16:59.471963
| 2020-04-08T13:52:22
| 2020-04-08T13:52:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,920
|
r
|
app_ui.R
|
#' @import shiny shinyMobile
#' @importFrom sever use_sever
#' @importFrom glouton use_glouton
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
f7Page(
title = "deminR",
icon = "www/icons/apple-touch-icon.png",
favicon = "www/icons/favicon.png",
manifest = "www/manifest.json",
init = mod_init_ui("init"),
f7TabLayout(
# hide page content if not logged (see app_server.R)
style = "visibility: hidden;",
navbar = f7Navbar(
title = "deminR",
hairline = FALSE,
shadow = TRUE,
bigger = FALSE,
transparent = TRUE,
left_panel = TRUE,
subNavbar = f7SubNavbar(
mod_about_me_ui("about_me_ui_1")[[1]],
mod_scores_ui("scores_ui_1")[[2]],
mod_about_me_ui("about_me_ui_1")[c(2, 3)]
)
),
messagebar = if (golem::get_golem_options("usecase") == "database") {
mod_chat_ui("chat_ui_1")[[2]]
},
panels = tagList(
mod_about_ui("about_ui_1"),
mod_help_ui("help_ui_1")[[1]]
),
mod_share_ui("share_ui_1")[[1]],
f7Tabs(
id = "tabset",
swipeable = TRUE,
style = "toolbar",
animated = FALSE,
.items = tagList(
mod_game_params_ui("game_params_ui_1"),
mod_help_ui("help_ui_1")[[2]]
),
f7Tab(
tabName = "main",
active = TRUE,
icon = f7Icon("gamecontroller", old = FALSE),
# main content
mod_welcome_ui("welcome_ui_1"),
mod_game_info_ui("game_info_ui_1"),
mod_game_grid_ui("game_grid_ui_1"),
mod_share_ui("share_ui_1")[[2]]
),
f7Tab(
tabName = "scores",
icon = f7Icon("list_number", old = FALSE),
mod_scores_ui("scores_ui_1")[[1]]
),
# only display if database
f7Tab(
tabName = "chat",
icon = f7Icon("chat_bubble_2", old = FALSE),
mod_chat_ui("chat_ui_1")[[1]]
)
)
)
)
)
}
#' @import shiny
golem_add_external_resources <- function(){
addResourcePath(
'www', system.file('app/www', package = 'deminR')
)
tags$head(
golem::activate_js(),
# Add here all the external resources
# If you have a custom.css in the inst/app/www
# Or for example, you can add shinyalert::useShinyalert() here
tags$script(src = "www/js/loginInputBinding.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "www/css/colorThemeChooser.css"),
shinyjs::inlineCSS(list(.darkleaflet = "background-color: #0000")),
shinyjs::useShinyjs(),
use_sever(),
use_glouton()
)
}
|
90c4812ec7d96a2a7a7df0ec38b81129c6a8f841
|
c1d36c065ee9ae8483cc853aa3f3f184a0ac9fd8
|
/File System in R.R
|
bd4f474ae9829e82c48fca348816ac1e17e874d4
|
[] |
no_license
|
JoshuaOluoch/File-manipulation-in-R
|
136a1350b0673d7d787e5324c20800b04369c222
|
b5223fe428e9ca5dde008d040f321b4464d2e38a
|
refs/heads/main
| 2023-02-26T21:10:45.306945
| 2021-02-05T09:21:06
| 2021-02-05T09:21:06
| 336,216,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,732
|
r
|
File System in R.R
|
#Temporarily disable warnings
defaultW <- getOption("warn")
options(warn = -1)
options(warn = defaultW)
# File management in R
#Getting the working directory in R
getwd() # Gets the current working directory
#Setting the working directory (Make)
#Make sure you enter the correct filepath according to the os you are using
#Notice that the separator between folders is forward slash (/),
#as it is on Linux and Mac systems.
#If you use the Windows operating system, the forward slash will look odd,
#because you're familiar with the backslash () of Windows folders.
#When working in Windows, you need to either use the forward slash or escape your backslashes using a double backslash (\).
setwd("C:\\Users\\joshua.oluoch\\Documents\\Personal\\School\\Huru\\Assignment\\27th Jan 2021\\")
#or
setwd("C:/Users/joshua.oluoch/Documents/Personal/School/Huru/Assignment/27th Jan 2021/")
#List all files within a folder
list.files() # This list all files within the working directory
#You can also specify the path you want to list its contents eg
list.files("C:/Users/joshua.oluoch/Documents/Personal/School/Huru/Class/January/Lab4/")
#List all the folders within the directory
#Current directory
list.dirs() #List all the folders of the current directory
#Parent folder
list.dirs("../") #Lists all the folders of the parent folder
#To choose a file dynamically in R, use the command file.choose() as illustrated in the
#To read a file from the current working directory
#R can read different file types eg csv, text (.txt), excel (.xlsx), spss data, stata data etc
#Direct file extensions (.csv, .txt, .xls, .xlsx, .ods, .dta, .sav, .R, .py etc)
#Reding CSV file
csv_data = read.csv("3 Yr Audience Trend.csv") #read in the csv using relative path
csv_data = read.csv("C:/Users/joshua.oluoch/Documents/Personal/School/Huru/Assignment/27th Jan 2021/3 Yr Audience Trend.csv") #read in the csv using the absolute path
head(csv_data)
#Reading Excel file
library(openxlsx)
xlsx_data = read.xlsx("3 Yr Audience Trend.xlsx")
head(xlsx_data)
#Reading spss, stata & SAS data using foreign package.
#1. SPSS
library(foreign)
spss_data = read.spss("sleep.sav", use.value.label=TRUE, to.data.frame=TRUE)
head(spss_data)
#2.Stata
stata_data = read.dta("http://www.stata-press.com/data/r12/census2.dta")
head(stata_data)
################################################################################
########## Writing Files to Disk################################################
#1. outout to csv
#Save to current directory
write.csv(csv_data, "csv_data_output.csv", row.names = FALSE) #write the dataframe to the current directory
#2. output to xlsx (use openxlsx package)
write.xlsx(xlsx_data, "xlsx_data_output.xlsx", row.names = FALSE)
#############################################################################
########## Data types in R ############################################
str(stata_data)
xlist <- list(a = "Karthik Ram", b = 1:10, data = head(mtcars))
str(xlist)
##############################################################################
#### Base R file manipulation functions ######################################
#1. To check if a file exists
file.access("rscfp2019.dta") # 0 for success, 1 for failure
#2. append two files from R
file.append("3 Yr Audience Trend - Copy.csv","3 Yr Audience Trend.csv")
#3. Choose a file interactively
file.choose()
#4. Copy a file from one directory to another using file.copy() function
dir.create("tmp")
file.copy("sleep.sav", "tmp")
list.files("tmp")
unlink("tmp", recursive = TRUE)
#5. Creating and Removing a file
file.create(c("a.txt","b.txt"))
file.remove("a.txt", "b.txt")
#6. Renaming a file
file.create("a")
file.rename("a","a_1")
file.remove("a_1")
|
214cef97ed5deb16551b0a6ca00c10faedbf1ce0
|
43f634905b36ef46f5cef252a74b66f979f33361
|
/plot1.R
|
be898262626aa8a672f8966a82d80d5b9edc3b01
|
[] |
no_license
|
emiline002/ExData_Plotting1
|
c0a6f280e17387b067f908c1c549066ae65a756a
|
d8aef10c74907bffd63ee07164c1ea7e047e18fb
|
refs/heads/master
| 2021-01-18T18:18:18.211190
| 2015-09-12T19:32:02
| 2015-09-12T19:32:02
| 42,369,067
| 0
| 0
| null | 2015-09-12T18:55:16
| 2015-09-12T18:55:15
| null |
UTF-8
|
R
| false
| false
| 436
|
r
|
plot1.R
|
power<-read.table("household_power_consumption.txt",head=TRUE, sep=";", stringsAsFactors = FALSE)
power$Date<-as.Date(power$Date,"%d/%m/%Y")
sub_power<-power[power$Date=="2007-02-01"|power$Date=="2007-02-02",]
sub_power$Date<-as.Date(sub_power$Date,"%d/%m/%Y")
sub_power[,3:9]<-apply(sub_power[,3:9], 2, function(x) as.numeric(x))
hist(sub_power[,3], xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
|
e3c334632f4373383267074f47b84a9a697cd9e2
|
f5d5fa2458aa0ad1f247a1c3cd3d00dcc008686a
|
/Examples/R/ChemoTest.R
|
085df8a08a74d6a4deb3d6b10752d6754e7af3e0
|
[] |
no_license
|
MatJim-Ottawa/CollocInfer
|
cafcae96850271176084973f218063d1b374a9f3
|
b5fe8102dee0301beaa5b4dbdecd79460c95d6f8
|
refs/heads/master
| 2021-01-23T16:36:18.710179
| 2014-11-07T14:44:12
| 2014-11-07T14:44:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,943
|
r
|
ChemoTest.R
|
sourceDir <- function(path, trace = TRUE) {
for (nm in list.files(path, pattern = "\\.[RrSsQq]$")) {
if(trace) cat(nm,":")
source(file.path(path, nm))
if(trace) cat("\n")
}
}
RosMac = function(t, x, p, more){
p = exp(p)
dx = x
dx[,'C1'] = p['r1']*x[,'C1']*(1- x[,'C1']/p['Kc1']- x[,'C2']/p['Kc2']) - p['p']*p['G']*x[,'C1']*x[,'B']/(p['KB']+p['p']*x[,'C1']+x[,'C2'])
dx[,'C2'] = p['r2']*x[,'C2']*(1- x[,'C1']/p['Kc1']- x[,'C2']/p['Kc2']) - p['G']*x[,'C2']*x[,'B']/(p['KB']+p['p']*x[,'C1']+x[,'C2'])
dx[,'B'] = p['chiB']*p['G']*(p['p']*x[,'C1']+x[,'C2'])*x[,'B']/(p['KB']+p['p']*x[,'C1']+x[,'C2']) - p['delta']*x[,'B']
return(dx)
}
RosMacODE = function(t,z,p){
p = exp(p)
x = exp(z)
dx = x
dx['C1'] = p['r1']*x['C1']*(1- x['C1']/p['Kc1']-x['C2']/p['Kc2']) - p['p']*p['G']*x['C1']*x['B']/(p['KB']+p['p']*x['C1']+x['C2'])
dx['C2'] = p['r2']*x['C2']*(1- x['C2']/p['Kc2']- x['C1']/p['Kc1']) - p['G']*x['C2']*x['B']/(p['KB']+p['p']*x['C1']+x['C2'])
dx['B'] = p['chiB']*p['G']*(p['p']*x['C1']+x['C2'])*x['B']/(p['KB']+p['p']*x['C1']+x['C2']) - p['delta']*x['B']
return(list(dx/x))
}
RMobsfn = function(t,x,p,more) {
x = exp(x)
y = cbind( x[,'C1']+x[,'C2'],x[,'B'])
return(log(y))
}
RMpars = c(0.2,0.025,0.125,2.2e4,1e5,5e6,1,1e9,0.3)
RMParnames = c('p','r1','r2','Kc1','Kc2','G','chiB','KB','delta')
names(RMpars)= RMParnames
logpars = log(RMpars)
RMVarnames = c('C1','C2','B')
x0 = c(50,50,2)
names(x0) = RMVarnames
time = 0:200
#res0 = lsoda(log(x0),time,RosMacODE,p = logpars)
#data = res0[,2:4] + 0.2*matrix(rnorm(603),201,3)
rr = range(time)
knots = seq(rr[1],rr[2],by=1)
bbasis = create.bspline.basis(rr,norder=4,breaks=knots)
data = as.matrix(read.csv(file = "E:\\data\\ChemoExampleDataR.csv",header = TRUE, sep = " "))
names(data) = RMVarnames
coef0 = smooth.basis(time,data,fdPar(bbasis,int2Lfd(2),10))$fd$coef
colnames(coef0) = RMVarnames
out = LS.setup(pars=logpars,coefs=coef0,basisvals=bbasis,fn=RosMac,lambda=1e5, times=time,posproc=TRUE)
lik = out$lik
proc = out$pro
res1 = ParsMatchOpt(logpars,coef0,proc)
res3 = outeropt(data,time,res1$pars,coef0,lik,proc)
data2 = cbind( log( exp(data[,'C1'])+exp(data[,'C2'])), data[,'B'])
out = LS.setup(pars=logpars,coefs=coef0,basisvals=bbasis,fn=RosMac,lambda=1e5, times=time,posproc=TRUE,likfn=RMobsfn)
lik2 = out$lik
proc2 = out$proc
coef02 = coef0
coef02[,1:2] = 0
Fres3 = FitMatchOpt(coef02,1:2,res1$pars,proc2)
res32 = outeropt(data2,time,res1$pars,Fres3$coefs,lik2,proc2)
write.table(res1$pars,file = "..\\..\\data\\res1pars.csv",col.names=FALSE,row.names=FALSE)
write.table(Fres3$coefs,file = "..\\..\\data\\Fres3pars.csv",col.names=FALSE,row.names=FALSE)
#
# > exp(res32$pars)
# p r1 r2 Kc1 Kc2 G chiB KB delta
# 2.259798e-01 2.925285e-02 1.295061e-01 5.531922e+08 4.778160e+03 5.048355e+06 1.028451e+00 9.684279e+08 3.063652e-01
|
acb03b6b2b27943b4d88505c40de4a96cc3dd5a6
|
c71e0c09a166cee1fc3f408c7c18e315496f0199
|
/R/snaker.R
|
02b477b7127b9f2b9aa4544da620745674d44b0f
|
[] |
no_license
|
fmarotta/snaker
|
765a5a3b2053ae0f4da5abe7c4cd93386f8aaae5
|
061b5e61adf5c9cd09a071d6fbbf80e803b3e7cf
|
refs/heads/master
| 2020-09-30T18:18:04.939139
| 2019-12-17T12:06:15
| 2019-12-17T12:06:15
| 227,345,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,846
|
r
|
snaker.R
|
#' snaker
#'
#' This function is equivalent to docopt::docopt, except that it can also handle
#' arguments passed by Snakemake. If the `snakemake' object exists, then the
#' arguments are taken from there, otherwise from the command line.
#'
#' @inheritParams docopt::docopt
#'
#' @return The list of named arguments and parameters.
#' @export
snaker <- function(doc, name = NULL, help = TRUE, version = NULL) {
if (exists("snakemake", where = .GlobalEnv, inherits = FALSE))
snakemake <- get("snakemake", envir = .GlobalEnv)
else
snakemake <- NULL
doc <- gsub(pattern = "\n[:blank:]{2,}", replacement = " ", x = doc)
# If there exists the snakemake object and it is of the right class,
# take the arguments from there.
if (class(snakemake) == "Snakemake") {
# Parse the commands
snake_slot <- snakemake@params[names(snakemake@params) == ""]
slot_names <- names(snakemake@params)[names(snakemake@params) != ""]
commands <- snake_slot[seq_len(length(snake_slot) - length(slot_names))]
options_fields <- c("input", "output", "params", "resources")
args_fields <- c("input", "output")
options <- list()
arguments <- list()
# Parse the log
if (length(snakemake@log) > 1) {
options_fields <- c(options_fields, "log")
args_fields <- c(args_fields, "log")
} else if (length(snakemake@log) == 1) {
if (is.null(names(snakemake@log)))
options <- append(options, paste("--log", snakemake@log, sep = "="))
else
options_fields < c(options_fields, "log")
}
# Parse the options
for (s in c("input", "output", "params", "resources")) {
snake_slot <- methods::slot(snakemake, s)
slot_names <- names(snake_slot)[names(snake_slot) != ""]
if (length(slot_names)) {
dash <- ifelse(nchar(snake_slot[slot_names]) == 1, "-", "--")
options <- append(options, paste(paste0(dash, slot_names), snake_slot[slot_names], sep = "="))
}
}
# Parse the arguments
for (s in args_fields) {
snake_slot <- methods::slot(snakemake, s)
slot_names <- names(snake_slot)[names(snake_slot) != ""]
snake_slot[slot_names] <- NULL
snake_slot[(length(snake_slot) - length(slot_names) + 1):(length(snake_slot) + 1)] <- NULL
arguments <- append(arguments, snake_slot)
}
snargs <- c(unlist(commands), unlist(options), unlist(arguments))
} else {
snargs <- commandArgs(TRUE)
}
argv <- docopt::docopt(doc, args = snargs, name = name, help = help, version = version,
strict = FALSE, strip_names = TRUE, quoted_args = TRUE)
argv
}
|
d8e9108ed2defec4e46864369802b23ccf9fde4a
|
4a3cf8e6e74db99a5a7d0f56757b8e3359bcd30d
|
/man/make_sl_task_list.Rd
|
31043eb5a2718a88dd52045c5170ecbb41a62b92
|
[
"MIT"
] |
permissive
|
bdwilliamson/cvma
|
1ae9710b4b57e8ef1dd0168fd84d75c324ef1454
|
6651ea9fece2a267261c454dadebf1d41e54aaaf
|
refs/heads/master
| 2021-10-25T09:40:01.855362
| 2019-04-03T15:02:00
| 2019-04-03T15:02:00
| 112,544,035
| 1
| 1
| null | 2017-11-30T00:32:40
| 2017-11-30T00:32:39
| null |
UTF-8
|
R
| false
| true
| 385
|
rd
|
make_sl_task_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/task_lists.R
\name{make_sl_task_list}
\alias{make_sl_task_list}
\title{Helper function to make a task list for computing super learners.}
\usage{
make_sl_task_list(Ynames, V, fold_fits = c(V - 1, V - 2))
}
\description{
Helper function to make a task list for computing super learners.
}
\keyword{internal}
|
81758b5e4dfaa4fbe3d0c5290f06366e3da2e37f
|
4052545c292db46b6363f299828dfc1d8d7f8b9a
|
/shiny2/int/input1c1.R
|
6bf774048cd00cf8a841ce4f0e4062c4cb689e09
|
[] |
no_license
|
uvesco/studioEpi
|
972324a5af179912d1ab2c60ea21f0f04cddfa33
|
ed0849185460d628aa1fdc73eb13e67555a87a75
|
refs/heads/master
| 2020-12-12T05:03:04.953612
| 2020-04-04T23:26:09
| 2020-04-04T23:26:09
| 234,048,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29
|
r
|
input1c1.R
|
output$input1 <- renderUI()
|
28ab5e6765e9defcd82ba29ffa96335f31b1b2d0
|
d630dfeae8965eddd8e2f1c0917aa06719c1de9e
|
/scripts/createOutputTestis.R
|
8693d6453ec15dd83178a86bddcbc48914914ded
|
[] |
no_license
|
AEBilgrau/effadj
|
5b67275ab22880fdc293d9143d1b888cdcc0a900
|
e0ca987d11502a23229b9731d3a712eda52b2a45
|
refs/heads/master
| 2020-05-20T19:29:25.136211
| 2016-02-23T21:41:26
| 2016-02-23T21:41:26
| 37,054,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,108
|
r
|
createOutputTestis.R
|
################################################################################
# Analysis of testis data #
# Written by: #
# Anders Ellern Bilgrau, Steffen Falgreen, and Martin Boegsted #
# Last revision: 11th of June, 2015 #
################################################################################
#
# Plot raw data
#
mypanel <- function(x, y, ...) {
panel.lmlineq(x, y, adj = c(1,0), lty = 2, col.line = "darkgrey", digits = 2,
at = 0.5, rot = TRUE, cex = 0.7, style = 2, pos = 1,
varNames = alist(y = C[q], x = k), offset = 1)
panel.xyplot(x, y, ...)
aa <- lm(y ~ x)
panel.text(2, 40, labels = sprintf("se = %.3f", sqrt(vcov(aa)[2,2])),
cex = 0.7)
}
# Trellis plot of all data
red.colours <- c("#F82A1C", "#F59691", "#DF6F38", "#AA5455")
stopifnot(nlevels(testis$geneName) == length(red.colours))
testis.data <- subset(testis, sampleType != "Standard")
fig2a <-
dotplot(sampleName ~ Cq | geneType:sampleType,
groups = geneName,
col = red.colours,
data = testis.data,
main = "",
xlab = expression(C[q]),
pch = seq_along(red.colours),
key = list(text = list(title = "A"),
corner = c(-0.1,1.1),
cex = 1.5, font = "bold"))
testis.std <- subset(testis, sampleType == "Standard")
fig2b <-
xyplot(Cq ~ l2con | geneName,
groups = geneName,
col = red.colours,
data = testis.std,
panel = mypanel,
xlab = "Dilution step",#as.expression(bquote(-log[2]*N["0,i,j,k"])),
ylab = expression(C[q]),
main = "",
pch = seq_along(red.colours),
key = list(text = list(title = "B"),
corner = c(-0.1,1.1),
cex = 1.5, font = "bold"))
setEPS()
postscript("../output/fig2.eps", width = 1.5*7, height = 0.5*7, fonts = "serif")
trellis.par.set(strip.background = list(col = "lightgrey"))
print(fig2a, position = c(0, 0, 0.5, 1), more = TRUE)
print(fig2b, position = c(0.5, 0, 1, 1))
dev.off()
rm(testis.data, testis.std)
#
# Analysis
#
#
# We wish to test
# mir127 vs rnu6b, mir127 vs rnu24, mir127 vs rnu6b + rnu24 (omitted)
# mir143 vs rnu6b, mir143 vs rnu24, mir143 vs rnu6b + rnu24 (omitted)
#
grps.list <- list(c("mir127", "rnu6b"),
c("mir127", "rnu24"),
c("mir143", "rnu6b"),
c("mir143", "rnu24"))
# Do boostrap
if (!exists("testis.boot") || recompute) {
message("Testis boostrap")
testis.boot <- list()
for (i in 1:length(grps.list)) {
# Subset data
testis.tmp <- as.data.qPCR(subset(testis, geneName %in% grps.list[[i]]))
# Compute bootstrap estimate and results
testis.boot[[i]] <- bootstrapEstimate(testis.tmp, n.boots = n.boots,
weighted = TRUE, alpha = 0.05)
message(sprintf("i = %d", i))
}
resave(testis.boot, file = save.file)
}
# Combine results
sink("../output/fit_summary_testis.txt")
pdf("../output/modelchecks_testis.pdf", onefile = TRUE)
toTeX <- NULL
we <- TRUE
for (i in 1:length(grps.list)) {
# Subset data
testis.tmp <- subset(testis, geneName %in% grps.list[[i]])
# Print fit information
m <- paste(grps.list[[i]], collapse = " vs. ")
cat("\n\n\n\n\n===", m, "===\n")
print(summary(fit <- qPCRfit(as.data.qPCR(testis.tmp), weighted = we)))
print(plot(fit, col = testis.tmp$sampleName, pch = testis.tmp$sampleType,
main = m))
# Create results for table
results <- rbind(
"t-test" = DDCq.test(testis.tmp, method = "N"),
"LMEM" = DDCq.test(testis.tmp, method = "LMM",
eff.cor = F, weighted = we),
"EC" = DDCq.test(testis.tmp, method = "LMM",
eff.cor = T, var.adj = F, weighted = we),
"ECVA1" = DDCq.test(testis.tmp, method = "LMM",
eff.cor = T, var.adj = T, weighted = we),
"ECVA2" = DDCq.test(testis.tmp, method = "LMM",
eff.cor = T, var.adj = T,
var.type = "montecarlo", weighted = we),
"Bootstrap" = testis.boot[[i]]
)
toTeX <- rbind(toTeX, results)
}
dev.off()
sink()
#
# Writing LaTeX table
#
toTeX <- signif(toTeX, 4)
toTeX[, 5] <- sn(toTeX[, 5])
colnames(toTeX) <- gsub("Pr(>|t|)", "$p$-value", colnames(toTeX), fixed = TRUE)
colnames(toTeX) <- gsub("t ", "$t$-", colnames(toTeX), fixed = TRUE)
rownames(toTeX) <- gsub("LMEM", "LMM", rownames(toTeX))
rownames(toTeX) <- gsub("t.", "$t$-", rownames(toTeX), fixed = TRUE)
rownames(toTeX) <- gsub("ECVA", "EC\\\\&VA", rownames(toTeX))
grps <-
sapply(grps.list, function(x) ifelse(length(x) == 3,
paste(x[1], "vs", x[2], "+", x[3]),
paste(x[1], "vs", x[2])))
caption.txt <- "
DLBCL data: Method comparison for estimating the $\\ddcq$-value.
EC: Efficiency corrected LMM estimate ignoring the uncertainty of the
efficiency estimates.
EC\\&VA1: EC and variance adjusted LMM estimate using the delta method.
EC\\&VA2: EC and variance adjusted LMM estimate using Monte Carlo integration.
Bootstrap: Estimate by the bootstrap described in Section \\ref{sec:bootstrap}
fitting the LMM and using the EC estimate.
Bootstrap shows the mean and standard deviation of %d bootstrap samples using
the EC estimate.
The last two columns show the $95%s$ lower and upper confidence interval
limits.
"
toTeX <- toTeX[!grepl("LMM|t-test", rownames(toTeX)), ]
w <- latex(toTeX,
file = "../output/Table2.tex",
title = "",
label = "table:tesits",
caption = sprintf(caption.txt, length(testis.boot), "\\%"),
caption.loc = "top",
rgroup = grps,
center = "center",
numeric.dollar = TRUE,
keep.tex = TRUE,
size = "small")
|
17a415fdc8f04a2450f3aabdd6137f1fdfde490e
|
5c7e7dce5d0b75b2299f0710393ecf29e768e342
|
/man/data_ziplist.Rd
|
f15c1e7c852f0825aaaec919ccad4fc069e71d18
|
[] |
no_license
|
SebEagle/snowcoveR
|
995c860ec05fe456b6c8914c48f37af532f50316
|
39d21758976bb697068c84e64aad84b86fddc05d
|
refs/heads/master
| 2020-03-09T08:22:35.168913
| 2018-05-25T21:29:21
| 2018-05-25T21:29:21
| 128,687,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 440
|
rd
|
data_ziplist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_ziplist.R
\name{data_ziplist}
\alias{data_ziplist}
\title{data_ziplist}
\usage{
data_ziplist(input_dir, input_file)
}
\arguments{
\item{input_dir}{Folder directory to zip file}
\item{inputfile}{name of zip-file}
}
\description{
is returning a list of all the files a zip-file contains
}
\author{
Sebastian Buchelt (M.Sc. EAGLE - University of Wuerzburg)
}
|
4c30e63f6fa3555b23e0cb83d0628cd1160fec65
|
0476f2bd245afe4b630aeab628499df2d91517db
|
/man/GenerateMetaboliteSQLiteDB.Rd
|
170d48f38fead7a397be90386e34855665696370
|
[] |
no_license
|
cran/InterpretMSSpectrum
|
d07f32034e3f68ab719c6827a4b1529f8d7fb503
|
ecf9604cfde5dd22a057b17ad2272cde7351157d
|
refs/heads/master
| 2023-07-24T03:18:25.154905
| 2023-07-07T14:00:02
| 2023-07-07T14:00:02
| 67,487,289
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,675
|
rd
|
GenerateMetaboliteSQLiteDB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenerateMetaboliteSQLiteDB.R
\name{GenerateMetaboliteSQLiteDB}
\alias{GenerateMetaboliteSQLiteDB}
\title{GenerateMetaboliteSQLiteDB.}
\usage{
GenerateMetaboliteSQLiteDB(
dbfile = "SQLite_APCI.db",
ionization = c("APCI", "ESI")[1],
mass_range = c(100, 105),
ncores = 1,
silent = TRUE
)
}
\arguments{
\item{dbfile}{Path and file name of the final SQLiteDB or NULL to return the data frame.}
\item{ionization}{Has to be specified to account for different plausibility rules and
elemental composition.}
\item{mass_range}{For testing use default range, otherwise use your measurement range.}
\item{ncores}{Number of cores. Use as many as possible.}
\item{silent}{Set to FALSE to get progress messages.}
}
\value{
Returns the resulting data frame invisible. Will write an SQL_DB if 'dbfile'
provides a valid path and file name.
}
\description{
\code{GenerateMetaboliteSQLiteDB} will set up a SQLite data base containing
potential metabolite formulas, their masses and isotopic distribution for use with
\link{InterpretMSSpectrum}.
}
\details{
The process takes a long time for larger masses (>400 Da). Parallel processing
with 8 cores is highly recommended. Alternatively pre-processed versions can be downloaded
on request to \email{jan.lisec@bam.de}. To process a 1 Da range (from 900 to 901) for
ESI does take approximately 5 minutes on 8 cores.
}
\examples{
# using the default values will compute be relatively fast, but for higher masses it
# is getting much slower
db <- GenerateMetaboliteSQLiteDB(dbfile = NULL)
}
|
2a6a9b0675d42c37150c19666a5dbcdcd487b1a4
|
f3e914e8a3ccb1c4d73555321e3eaf52b59f52e0
|
/R/3.3.R
|
af3d975b9af9072f261593027fd74c6ffb90d42b
|
[] |
no_license
|
youjia36313/learn_R
|
08be35ebc032839e8c25466c63ae5a0292069855
|
674de3d09e0e7dfec2d3e164ffab98e0c40ca597
|
refs/heads/master
| 2020-09-15T19:39:00.136679
| 2019-11-23T06:37:41
| 2019-11-23T06:37:41
| 223,541,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
3.3.R
|
df_01 <- read.csv('HeightWeightData.csv')
df_01
#getwd()
plot(df_01$height,df_01$weight,pch=2,col="blue",xlab="height",ylab="weight")
|
f5f488540d524f895c2132dbe105c55b978e49c1
|
3cc2e5d0f3c74dee646346bf499a6bc3b97a8266
|
/HW1_Creditcard_SVM.R
|
ff9079a598c9c7750d6b025faad77930ea4fcc35
|
[] |
no_license
|
aten2001/Analytical-Models-Assignments
|
d636a95f816d04a793b573e1e52aef785c18f12a
|
6d4ccf4decd1e5895badfb636f3dc813c33fed71
|
refs/heads/master
| 2021-06-20T11:03:09.363546
| 2017-07-05T23:05:05
| 2017-07-05T23:05:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,588
|
r
|
HW1_Creditcard_SVM.R
|
################################# Start of Code =====================================
rm(ls())
getwd()
setwd("G:/Georgia Tech/Analytical Models/Assignments")
install.packages("data.table")
install.packages("kernlab")
install.packages("caret")
library(data.table)
#fread is quicker that read.table and read_table in the readr package
cred_data = fread("credit_card_data.csv")
View(cred_data)
#Changing the name of the response variable
cred_names = colnames(cred_data)
cred_names[11] = "Response"
names(cred_data) = cred_names
#Exploring the data
summary(cred_data)
str(cred_data)
unique(cred_data$V5)
#V1, V5, V6, V8 are binary
#Converting V5 to a binary (1, 0) response as there is no loss of info in doing so
cred_data$V5[cred_data$V5 == "t"] = as.integer(1)
cred_data$V5[cred_data$V5 == "f"] = as.integer(0)
class(cred_data$V5)
#as.integer does not work here as the type of the column is still the same
cred_data$V5 = as.numeric(cred_data$V5)
#Checking the correlation between various variables
#Using this command is possible as all the variables are numeric
corr_table = as.data.frame(cor(cred_data))
View(corr_table)
########We can try PCA here???
########Standardizing the variables might not be the best option as using the
#####standardized coefficients for prediction is not a very good idea.
#####The standardization will be based on the mean and standard deviation of the
#####train data. It would also mess with the new data coming in which will not
#####be standardized . How will this transformation affect the model?????
#####If we do standardize, how will we correct for that when predicting?
#Correlation between variables is not a lot. And they are also not very highly
#correlated to the response. Except for V5.
#The binary variables are all integers. Should convert them to factors for a binary
#response.
cred_data$V1 = as.factor(cred_data$V1)
cred_data$V5 = as.factor(cred_data$V5)
cred_data$V6 = as.factor(cred_data$V6)
cred_data$V8 = as.factor(cred_data$V8)
cred_data$Response = as.factor(cred_data$Response)
#There is no intuitive way to know what the variables represent so plotting is not
#a necessary option. We can directly go ahead with the model.
#Dividing up the data
require(caret)
#Caret is a huge package and is very good for dividing the data into samples
#There is a way to divide on predictors and time series data as well
#http://topepo.github.io/caret/splitting.html
set.seed(1123)
split_index = createDataPartition(cred_data$Response, p = 0.8, list = FALSE, times = 1)
#There is also a way to bootstrap samples and CV folds - createResample and createFolds
cred_train = cred_data[split_index,]
cred_test = cred_data[-split_index,]
#Testing if the distribution of the dependant variable is the same as the main data set
mean(cred_data$Response)
mean(cred_train$Response)
mean(cred_test$Response)
#It is more or less the same.
#Building the model
library(kernlab)
#Based on the understanding of the SVM algorithm, trying out plotting variables
require(ggplot2)
ggplot(data = cred_data, aes(x = cred_data$V2, y = cred_data$V3,
color = cred_data$Response)) + geom_point()
ggplot(data = cred_data, aes(x = cred_data$V2, y = cred_data$V4,
color = cred_data$Response)) + geom_point() + geom_smooth()
#There does't look like any separator in the 2-D. That is why we will use all the
#predictor variables.
#Using the SVM algorithm
#Using the default kernel
svmodel = ksvm(Response ~ . , data = cred_train, C = 10, type = "C-svc")
#=========================================
#There are a lot of parameters in the ksvm model. The type can change this to
#a regression or novelty detection model.
#The scaled parameter can scale all the
#numeric variables and then scale them back during the prediction.
#=========================================
#Tried the model for C ranging from 1 to 100000. Got the best accuracy on the
#test data for the particular seed at C = 10.
#Taking a detailed look at the model
attributes(svmodel)
svmodel
summary(svmodel)
str(svmodel)
#Making the predictions on the test data
predict_response = predict(svmodel, newdata = cred_test)
#Making the contingency table
table(cred_test$Response, predict_response)
#We got an ACCURACY of 90.7% with the selected seed and C value
#ACCURACY = True Positives + True Negatives/ Total Observations
#The equation for the 10 dimensional seperator
#Intercept
b = b(svmodel)
b
#The coefficients of the equation
w <- colSums(coef(svmodel)[[1]] * cred_train[unlist(alphaindex(svmodel)),])
w
|
fdf6c4b6bce4c7beefcdbd8436a170ee98f03c9c
|
b1cccc43340f5e1100a95428ecfe6a14fadb215a
|
/man/logging_info.Rd
|
5ab927bea45abb0d8aa75711a337bcca33551a62
|
[
"MIT"
] |
permissive
|
MRCIEU/ieugwasr
|
b041818e3de4db287aed0667c5e167ac0bdf74f3
|
33e4629f4dacd635c68e690bb5648de529c333cc
|
refs/heads/master
| 2022-07-01T22:52:46.713761
| 2022-06-15T14:27:21
| 2022-06-15T14:27:21
| 214,448,457
| 35
| 17
|
NOASSERTION
| 2022-03-16T14:54:21
| 2019-10-11T13:50:01
|
R
|
UTF-8
|
R
| false
| true
| 258
|
rd
|
logging_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{logging_info}
\alias{logging_info}
\title{Details of how access token logs are used}
\usage{
logging_info()
}
\description{
Details of how access token logs are used
}
|
e8ccc95be825fb955d6bbea34aa98ed8640739a6
|
30a1398bc0ff11036867015db2a8cec1cf73b953
|
/code_simulation/plot_one_case.R
|
208de7fd60bd926bc56b75a282d211ccba869cf8
|
[
"MIT"
] |
permissive
|
wilsoncai1992/MOSS-simulation
|
d6a34506c3f5dd76797420926ff8f3dced8015e8
|
8b927ef14fa981bd8ab530da165ebb7ef1dd24f3
|
refs/heads/master
| 2021-03-22T01:11:52.358060
| 2019-06-22T07:03:55
| 2019-06-22T07:03:55
| 123,011,317
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,075
|
r
|
plot_one_case.R
|
library(survival)
library(MOSS)
library(survtmle)
library(tidyverse)
library(ggplot2)
library(ggpubr)
source("../fit_survtmle.R")
# simulate data
source("./simulate_data.R")
do_once <- function(n_sim = 2e2) {
simulated <- simulate_data(n_sim = n_sim)
df <- simulated$dat
true_surv <- simulated$true_surv1
sl_lib_g <- c("SL.mean", "SL.glm", "SL.gam", "SL.earth")
sl_lib_censor <- c("SL.mean", "SL.glm", "SL.gam", "SL.earth")
sl_lib_failure <- c("SL.mean", "SL.glm", "SL.gam", "SL.earth")
range(df$T.tilde)
df$T.tilde <- df$T.tilde + 1
k_grid <- 1:max(df$T.tilde)
message("KM")
n_sample <- nrow(df)
km_fit <- survfit(Surv(time = T.tilde, event = Delta) ~ A, data = df)
surv1_km <- tail(km_fit$surv, km_fit$strata["A=1"])
time1_km <- tail(km_fit$time, km_fit$strata["A=1"])
surv0_km <- tail(km_fit$surv, km_fit$strata["A=0"])
time0_km <- tail(km_fit$time, km_fit$strata["A=0"])
library(zoo)
impute_KM <- function(time, km) {
surv1_km_final <- rep(NA, max(df$T.tilde))
surv1_km_final[time] <- km
surv1_km_final <- na.locf(surv1_km_final, na.rm = FALSE)
surv1_km_final[is.na(surv1_km_final)] <- 1
return(surv1_km_final)
}
surv1_km_final <- impute_KM(time = time1_km, km = surv1_km)
surv0_km_final <- impute_KM(time = time0_km, km = surv0_km)
km_fit_1 <- survival_curve$new(t = k_grid, survival = surv1_km_final)
km_fit_0 <- survival_curve$new(t = k_grid, survival = surv0_km_final)
message("SL")
W_names <- c("W", "W1")
sl_fit <- initial_sl_fit(
T_tilde = df$T.tilde,
Delta = df$Delta,
A = df$A,
W = data.frame(df[, W_names]),
t_max = max(df$T.tilde),
sl_treatment = sl_lib_g,
sl_censoring = sl_lib_censor,
sl_failure = sl_lib_failure
)
sl_fit$density_failure_1$hazard_to_survival()
sl_fit$density_failure_0$hazard_to_survival()
# WILSON hack no data is t_tilde = 2
sl_fit$density_failure_1$t <- k_grid
sl_fit$density_failure_0$t <- k_grid
# ipcw
message("ipcw + ee")
ipcw_fit_1_all <- repeat_t_grid$new(
method = ipcw,
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_1,
density_censor = sl_fit$density_censor_1,
g1W = sl_fit$g1W,
A_intervene = 1
)$fit(k_grid = k_grid)
ipcw_fit_0_all <- repeat_t_grid$new(
method = ipcw,
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_0,
density_censor = sl_fit$density_censor_0,
g1W = sl_fit$g1W,
A_intervene = 0
)$fit(k_grid = k_grid)
ee_fit_1_all <- repeat_t_grid$new(
method = ee,
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_1,
density_censor = sl_fit$density_censor_1,
g1W = sl_fit$g1W,
A_intervene = 1
)$fit(k_grid = k_grid)
ee_fit_0_all <- repeat_t_grid$new(
method = ee,
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_0,
density_censor = sl_fit$density_censor_0,
g1W = sl_fit$g1W,
A_intervene = 0
)$fit(k_grid = k_grid)
ipcw_fit_1 <- survival_curve$new(t = k_grid, survival = ipcw_fit_1_all)
ipcw_fit_0 <- survival_curve$new(t = k_grid, survival = ipcw_fit_0_all)
ee_fit_1 <- survival_curve$new(t = k_grid, survival = ee_fit_1_all)
ee_fit_0 <- survival_curve$new(t = k_grid, survival = ee_fit_0_all)
# gg_sl <- ggarrange(
# sl_fit$density_failure_1$display(type = "survival", W = df$W),
# sl_fit$density_failure_0$display(type = "survival", W = df$W),
# ncol = 2,
# labels = "AUTO",
# common.legend = TRUE,
# legend = "bottom"
# )
sl_density_failure_1_marginal <- sl_fit$density_failure_1$clone(deep = TRUE)
sl_density_failure_0_marginal <- sl_fit$density_failure_0$clone(deep = TRUE)
sl_density_failure_1_marginal$survival <- matrix(colMeans(sl_density_failure_1_marginal$survival), nrow = 1)
sl_density_failure_0_marginal$survival <- matrix(colMeans(sl_density_failure_0_marginal$survival), nrow = 1)
# gg_sl2 <- ggarrange(
# sl_density_failure_1_marginal$display(type = "survival"),
# sl_density_failure_0_marginal$display(type = "survival"),
# ncol = 2,
# labels = "AUTO",
# common.legend = TRUE,
# legend = "bottom"
# )
# gg_ipcw <- ggarrange(
# ipcw_fit_1$display(type = "survival"),
# ipcw_fit_0$display(type = "survival"),
# ncol = 2,
# labels = "AUTO",
# common.legend = TRUE,
# legend = "bottom"
# )
# gg_ee <- ggarrange(
# ee_fit_1$display(type = "survival"),
# ee_fit_0$display(type = "survival"),
# ncol = 2,
# labels = "AUTO",
# common.legend = TRUE,
# legend = "bottom"
# )
message("moss classic")
moss_fit <- MOSS$new(
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_1,
density_censor = sl_fit$density_censor_1,
g1W = sl_fit$g1W,
A_intervene = 1,
k_grid = k_grid
)
psi_moss_1 <- moss_fit$onestep_curve(
epsilon = 1e-3,
max_num_interation = 1e2,
verbose = TRUE
)
moss_fit <- MOSS$new(
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_0,
density_censor = sl_fit$density_censor_0,
g1W = sl_fit$g1W,
A_intervene = 0,
k_grid = k_grid
)
psi_moss_0 <- moss_fit$onestep_curve(
epsilon = 1e-3,
max_num_interation = 1e2,
verbose = TRUE
)
moss_fit_1 <- survival_curve$new(t = k_grid, survival = psi_moss_1)
moss_fit_0 <- survival_curve$new(t = k_grid, survival = psi_moss_0)
message("moss with l2 submodel")
moss_hazard_l2 <- MOSS_hazard$new(
A = df$A,
T_tilde = df$T.tilde,
Delta = df$Delta,
density_failure = sl_fit$density_failure_1,
density_censor = sl_fit$density_censor_1,
g1W = sl_fit$g1W,
A_intervene = 1,
k_grid = k_grid
)
moss_hazard_l1 <- moss_hazard_l2$clone(deep = TRUE)
psi_moss_l2_1 <- moss_hazard_l2$iterate_onestep(
method = "l2", epsilon = 1e-1 / sqrt(n_sim), verbose = FALSE
)
moss_hazard_l2_1 <- survival_curve$new(t = k_grid, survival = psi_moss_l2_1)
psi_moss_hazard_l1_1 <- moss_hazard_l1$iterate_onestep(
method = "l1", epsilon = 1e-1 / sqrt(n_sim), verbose = FALSE
)
moss_hazard_l1_1 <- survival_curve$new(t = k_grid, survival = psi_moss_hazard_l1_1)
moss_hazard_l2_1
moss_hazard_l1_1
# tmle
message("tmle")
tmle_fit <- tryCatch({
tmle_fit <- fit_survtmle(
T.tilde = df$T.tilde,
Delta = df$Delta,
A = df$A,
W_df = data.frame(df[, c("W", "W1")]),
SL.trt = sl_lib_g,
SL.ctime = sl_lib_censor,
SL.ftime = sl_lib_failure
)
},
error = function(cond) {
message("tmle error")
NULL
}
)
if (is.null(tmle_fit)) {
tmle_fit_1 <- sl_density_failure_1_marginal$clone(deep = TRUE)
tmle_fit_0 <- sl_density_failure_0_marginal$clone(deep = TRUE)
} else {
tmle_fit_1 <- survival_curve$new(t = k_grid, survival = tmle_fit$s_1)
tmle_fit_0 <- survival_curve$new(t = k_grid, survival = tmle_fit$s_0)
}
survival_truth_1 <- survival_curve$new(t = k_grid, survival = simulated$true_surv1(k_grid))
survival_truth_0 <- survival_curve$new(t = k_grid, survival = simulated$true_surv0(k_grid))
is_monotone_tmle <- all(diff(as.numeric(tmle_fit_1$survival)) <= 0)
is_monotone_ipcw <- all(diff(as.numeric(ipcw_fit_1$survival)) <= 0)
is_monotone_ee <- all(diff(as.numeric(ee_fit_1$survival)) <= 0)
df_curve_sl1 <- sl_density_failure_1_marginal$create_ggplot_df()
df_curve_tmle1 <- tmle_fit_1$create_ggplot_df()
df_curve_moss1 <- moss_fit_1$create_ggplot_df()
df_curve_moss_l11 <- moss_hazard_l1_1$create_ggplot_df()
df_curve_moss_l21 <- moss_hazard_l2_1$create_ggplot_df()
df_curve_km1 <- km_fit_1$create_ggplot_df()
df_curve_ipcw1 <- ipcw_fit_1$create_ggplot_df()
df_curve_ee1 <- ee_fit_1$create_ggplot_df()
df_curve_sl1$method <- "super learner"
df_curve_tmle1$method <- "TMLE"
df_curve_moss1$method <- "MOSS_classic"
df_curve_moss_l11$method <- "MOSS_l1"
df_curve_moss_l21$method <- "MOSS_l2"
df_curve_km1$method <- "KM"
df_curve_ipcw1$method <- "IPCW"
df_curve_ee1$method <- "EE"
df_curve <- rbind(
df_curve_sl1,
df_curve_tmle1,
df_curve_moss1,
df_curve_moss_l11,
df_curve_moss_l21,
df_curve_km1,
df_curve_ipcw1,
df_curve_ee1
)
if (!is_monotone_tmle & !is_monotone_ee) {
return(df_curve)
} else {
return(NULL)
}
}
N_SIMULATION <- 5e1
library(foreach)
library(doSNOW)
library(tcltk)
nw <- parallel:::detectCores()
cl <- makeSOCKcluster(nw)
registerDoSNOW(cl)
n_sim_grid <- c(1e2)
# n_sim_grid <- c(1e3)
df_metric <- foreach(
n_sim = n_sim_grid,
.combine = rbind,
.packages = c("R6", "MOSS", "survtmle", "survival"),
.inorder = FALSE,
.errorhandling = "remove",
.verbose = TRUE
) %:%
foreach(it2 = 1:N_SIMULATION, .combine = rbind, .errorhandling = "remove") %dopar% {
df <- do_once(n_sim = n_sim)
if (!is.null(df)) {
df$id_mcmc <- it2
return(df)
} else {
return(NULL)
}
}
unique(df_metric$id_mcmc)
gglist <- list()
df_metric <- df_metric %>%
filter(method != "MOSS_classic") %>%
mutate(method = recode(
method,
TMLE = "iter. TMLE",
MOSS_l1 = "OS TMLE (lasso)",
MOSS_l2 = "OS TMLE (ridge)",
"super learner" = "Super learner",
)) %>% mutate(method = factor(
method,
levels = c("KM", "Super learner", "IPCW", "EE", "iter. TMLE", "OS TMLE (ridge)", "OS TMLE (lasso)"))
)
for (idx in unique(df_metric$id_mcmc)) {
gg <- ggplot(df_metric %>% filter(id_mcmc == idx), aes(t, s, color = method)) +
geom_line() +
ylab("Survival probability") +
labs(color = "Method") +
guides(color = guide_legend(nrow = 4)) +
theme_bw() +
theme(legend.position = "bottom")
gglist <- c(gglist, list(gg))
}
gg_panel <- ggarrange(plotlist = gglist, legend = "bottom")
ggpubr::ggexport(plotlist = gglist, filename = "panel.pdf", width = 4, height = 4)
# shut down for memory
# closeCluster(cl)
# mpi.quit()
stopCluster(cl)
|
e806b02f6be4b856a85695077edbaf6432df2d7d
|
af96e7785b76034c2c167545b6f174e6bfe4fb85
|
/school_clustering/pca_clustering_schools.R
|
835cdb1a2b3e0a1fd7efb796973a8b5f1b070357
|
[] |
no_license
|
Nhiemth1985/rviz
|
8c4d0db2f8639b1a985c1e189987545a97e41938
|
c959319db2597d3adadbcf3fcba08694ce5f31cf
|
refs/heads/master
| 2022-12-06T03:32:10.254564
| 2020-08-28T16:01:03
| 2020-08-28T16:01:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,823
|
r
|
pca_clustering_schools.R
|
remove(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
setwd("/Users/harrocyranka/Desktop/rviz/school_clustering/")
library(tidyverse);library(readxl)
x <- read_csv("dataset_with_clusters.csv") %>%
select(pct_school_lunch, median_hh_income, pct_minority, enrollment)
standardize <- function(my_list){
numerator <- (my_list - mean(my_list))
denominator <- sd(my_list)
last <- numerator/denominator
last <- round(last, 3)
return(last)
}
##Fit K-means and
set.seed(1234)
k_fit <- kmeans(x %>% mutate_all(standardize),6, nstart = 25)
x <- x %>% mutate(cluster = k_fit$cluster)
##
cluster_statistics <- x %>% group_by(cluster) %>%
dplyr::summarise(avg_school_lunch = 100*mean(pct_school_lunch),
avg_median_hh = mean(median_hh_income),
avg_non_white = 100*mean(pct_minority),
avg_enrollment = mean(enrollment),
school_number = n()) %>%
magrittr::set_colnames(c("Cluster", "Mean % school lunch",
"Mean zip median household income",
"Mean % non-white",
"Mean enrollment",
"Total schools"))
##PCA
pca <- prcomp(x %>% select(-cluster),center = TRUE, scale = TRUE)
explained_variance <- tibble(component = 1:4, explained_variance = round(pca$sdev^2/sum(pca$sdev^2)*100,1))
explained_variance %>% ggplot(aes(x = component, y = round(explained_variance,2), label= explained_variance)) + geom_col() +
theme_minimal() + geom_label() +
labs(x = "Component", y= "Explained Variance in %",title = "Explained variance for each component")
pca_tibble <- tibble(pc1 = pca$x[,1], pc2 = pca$x[,2]) %>%
mutate(cluster = x$cluster)
pca_tibble %>% ggplot(aes(x = pc1, y = pc2, color = as.character(cluster))) +
geom_point(size = 0.5) + theme_bw() +
labs(x = "First Principal Component", y = "Second Principal Component", color = "Cluster assignment",
title = "Scores for the first two principal components")
round(pca$rotation,2)
cluster_statistics
##New York: Send this to visualize in tableau
ny_list <- read_csv("dataset_with_clusters.csv") %>%
mutate(cluster = x$cluster) %>%
filter(county %in% c("New York County", "Kings County", "Queens County", "Bronx County",
"Richmond County") & lstate == "NY") %>%
dplyr::select(latcod, loncod, cluster, enrollment) %>%
dplyr::rename(lat = latcod, lon = loncod)
write_csv(ny_list, "new_york_school_list.csv")
##Chicago tableau
chicago <- read_csv("dataset_with_clusters.csv") %>%
mutate(cluster = x$cluster) %>%
filter(county %in% c("Cook County") & lstate == "IL") %>%
dplyr::select(latcod, loncod, cluster, enrollment) %>%
dplyr::rename(lat = latcod, lon = loncod)
write_csv(chicago, "chicago_school_list.csv")
##
cluster_statistics %>%
select(-`Total schools`) %>%
mutate(`Mean zip median household income` = round(`Mean zip median household income`,0),
`Mean enrollment` = round(`Mean enrollment`,0),
`Mean % school lunch` = round(`Mean % school lunch`,1),
`Mean % non-white` = round(`Mean % non-white`,1)) %>%
gather(measure, total,-Cluster) %>%
arrange(Cluster) %>%
ggplot(aes(x = as.character(Cluster), y = total, fill = as.character(Cluster),label = round(total,2))) +
geom_col(show.legend = FALSE) + geom_label(show.legend = FALSE, fill = "white", size = 3) +
facet_wrap(~measure, scales = "free", ncol = 2) + theme_bw() +
labs(x = "Cluster", title = "Cluster statistics", y = "Total",
subtitle = "Data on 25,000 public schools",
caption = "Source: Department of Education")
####Create Cluster Map####
library(shiny)
library(plotly)
create_cluster_map <- function(cluster_vector, colors_vector){
us_map <- read_csv("dataset_with_clusters.csv") %>%
mutate(cluster = x$cluster) %>%
filter(cluster %in% cluster_vector) %>%
dplyr::select(latcod, loncod, cluster, enrollment, school_name)
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("white"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("black"),
countrycolor = toRGB("black")
)
p <- plot_geo(us_map, locationmode = 'USA-states', sizes = c(2, 20)) %>% add_markers(
x = ~loncod, y = ~latcod, size = ~enrollment,color = ~cluster,colors = colors_vector,hoverinfo = "text",
mode = "markers",
text = ~paste('School Name: ',school_name,
'<br> Cluster: ',cluster)
) %>% layout(title = "US Map: Clusters 2 and 3", geo = g,showlegend = FALSE, width = 2000,height = 1000) %>%
hide_colorbar()
return(p)
}
y <- create_cluster_map(c(2,3),c("goldenrod","forestgreen"))
y
|
d010f01d4a8b4250c094eae3662193e11f89457d
|
26b7fb893d70c2aae8666248bee804c2ea159d11
|
/man/aveMatFac.Rd
|
f8cb483ef300b8afdd6ffb4d0f917b49ff198a5d
|
[] |
no_license
|
wefang/ghelper
|
5b77b4067f2ec8633a7600b2f745e884c17b68bf
|
7e150a12e6c1d5801cd46122edf9cd118721086a
|
refs/heads/master
| 2021-01-20T11:38:53.028888
| 2020-11-13T03:43:58
| 2020-11-13T03:43:58
| 56,557,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 252
|
rd
|
aveMatFac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{aveMatFac}
\alias{aveMatFac}
\title{Average matrix rows based on a factor}
\usage{
aveMatFac(mat, fac)
}
\description{
Average matrix rows based on a factor
}
|
9a91a2b0cc87d2baf661503e91252f59b0fe9880
|
91faa6d30c4ec3f62a19facf1b9fddaa5f95707f
|
/QTL/SHOREmap_qtlseq.R
|
481cb1cf054ed2cb154da7e93f264a7e389ce69a
|
[] |
no_license
|
zzygyx9119/shoremap
|
ae546715e830b17391410aece21958b7243e0a4b
|
374c45ff5894c4a012ffcce71191993a6e043d27
|
refs/heads/master
| 2016-08-08T17:52:29.591383
| 2012-12-18T08:46:56
| 2012-12-18T08:46:56
| 50,601,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,438
|
r
|
SHOREmap_qtlseq.R
|
#v09 - changes in ploting
#v10 - added the true interval of the qtl
#v11 - removed bootstrapping
#v12 - fixed output and corrected thresholds
#v13 - return of the bootstrap
library(bbmle)
source("/projects/dep_coupland/grp_nordstrom/projects/Lotus/Simulation/PopSimulatorRIL/Rcode/SHOREmap_qtlseq_lib.R")
#Run.....
args<-commandArgs(trailingOnly=TRUE)
#command line arguments
high_file<-args[1]
low_file<-args[2]
if(length(args)==4){
qtl_file<-args[3]
outprefix<-args[4]
}else if(length(args)==3){
outprefix<-args[3]
qtl_file=""
}else{
print("Usage:\nR --args seqfile1 seqfile2 outprefix < SHOREmap_qtlseq.R\nor\nR --args seqfile1 seqfile2 qtlfile outprefix < SHOREmap_qtlseq.R")
q("no",616,FALSE)
}
#Parameters
tolerance=2e6
winSize<-1000000
winStep<-10000
p.valueCutoff<-0.2
minMarkersInWindow<-10
minCoverage <- 0
maxCoverage <- 3000000
minFrequencyChange<-0.05
minAbsolutePeak<-0.05
bootstrap=1000
#prep data
hs<-read.table(high_file)
ls<-read.table(low_file)
#extract instersection of markers
chrmod<-10**ceiling(log10(max(hs[,2],ls[,2])))
hs.mod<-hs[,1]*chrmod+hs[,2]
ls.mod<-ls[,1]*chrmod+ls[,2]
intersect.mod<-intersect(hs.mod,ls.mod)
hs<-hs[hs.mod %in% intersect.mod,]
ls<-ls[ls.mod %in% intersect.mod,]
hs.cov<-rowSums(hs[,3:5])
ls.cov<-rowSums(ls[,3:5])
goodCov<-(hs.cov>minCoverage & hs.cov<maxCoverage)&(ls.cov>minCoverage &ls.cov<maxCoverage)
hs<-hs[goodCov,]
ls<-ls[goodCov,]
qtls<-data.frame(pos=-1,chr=-1)
if(qtl_file==""){
qtls<-data.frame(pos=-1,chr=-1)
}else{
qtls<-read.table(paste(qtl_file,".qtl.txt",sep=""))
ls.t<-read.table(paste(qtl_file,".low.AF.txt",sep=""))
hs.t<-read.table(paste(qtl_file,".high.AF.txt",sep=""))
a.t<-cbind(ls.t[,1],ls.t[,2],(hs.t[,3]-ls.t[,3])/100)
marker<-qtls[,2]+1
freqDiff<-a.t[marker,3]
trueStart<-sapply(1:length(freqDiff),function(i){
start<-marker[i]
while(a.t[start,3]==freqDiff[i]&&start>1){
start<-start-1
}
a.t[start,2]+1
})
trueEnd<-sapply(1:length(freqDiff),function(i){
end<-marker[i]
while(a.t[end,3]==freqDiff[i]&& end<nrow(a.t)){
end<-end+1
}
a.t[end,2]-1
})
rank<-sort(sort(abs(qtls[,5]),index.return=TRUE,decreasing=TRUE)$ix,index.return=TRUE)$ix
lg<-sapply(qtls[,3],function(x) ifelse(sum(qtls[,3]==x)>1,x,0))
lgType<-sapply(lg,function(x) ifelse(x==0,0,{
tt<-table(sign(qtls[lg==x,5]))
ifelse(length(tt)==1,sign(sum(qtls[lg==x,5])),ifelse(tt[1]<tt[2],-tt[1]/tt[2],ifelse(tt[1]==tt[2],0,tt[2]/tt[1])))
}))
qtls<-data.frame(id=qtls[,1],pos=qtls[,4],chr=qtls[,3],effect=qtls[,5],rank=rank,lg=lg,lgType=lgType,trueFreqDiff=freqDiff,trueStart=trueStart,trueEnd=trueEnd)
}
##merge
hs.freq<-hs[,3]/rowSums(hs[,3:5])
ls.freq<-ls[,3]/rowSums(ls[,3:5])
data<- cbind(hs[,1:2],hs.freq,hs[,3:5],ls.freq,ls[,3:5],hs.freq-ls.freq)
#p<-apply(data,1,function(x) fisher.test(matrix(as.numeric(x[c(4,5,8,9)]),nrow=2))$p.value)
p<-rep(0,nrow(data))
data<-cbind(data,p)
shifts<-seq(0,winSize-1,winStep)
cutOffs<-matrix(c(sapply(unique(data[,1]),function(chr){
data2<-data[data[,1]==chr,]
nrOfMarkers<-nrow(data2)
nrInWindow<-round(nrOfMarkers/max(data2[,2])*winSize)
replicate(bootstrap,{
p<-data2[,2]
i<-sample(1:(nrOfMarkers-nrInWindow+1),size=ceiling(nrOfMarkers/nrInWindow),replace=TRUE)
i<-sapply(i,function(x) x:(x+nrInWindow-1))[1:nrOfMarkers]
d2<-data2
d2[,11]<-d2[,11]*sample(c(1,-1),nrOfMarkers,replace=TRUE)
sorted<-windowedScore(d2,winSize,winStep,minMarkersInWindow,p.valueCutoff)
sorted<-sorted[sorted[,1]>min(data2[,2])+winSize/2 & sorted[,1]<max(data2[,2])-winSize/2,]
peaks<-predictAllPeaks(sorted,winSize,FALSE)
#lines(sorted[,c(1,4)],col="red")
rbind(peaks[,2],pmax(peaks[,3],peaks[,4]))
},simplify=TRUE)
}),recursive=TRUE),ncol=2,byrow=TRUE)
#cutOffs<- matrix(rep(c(minAbsolutePeak,minFrequencyChange),10000),ncol=2,byrow=TRUE)
cutOffs<-cbind(sort(abs(cutOffs[,1])),sort(cutOffs[,2]))
minAbsolutePeak<-max(cutOffs[round(nrow(cutOffs)*0.99),1],minAbsolutePeak)
minFrequencyChange<-max(cutOffs[round(nrow(cutOffs)*0.99),2],minFrequencyChange)
estimates<- matrix(ncol=5)[FALSE,]
colnames(estimates)<-c("chr","start","stop","freqDiff","Est")
nrOfChrs<-length(unique(data[,1]))
pdf(paste(outprefix,".plots.pdf",sep=""))
par(mfrow=c(ceiling(nrOfChrs/2),2))
colors<-rainbow(50,start=4/6,end=1/6)
for(chr in unique(data[,1])){
data2<-data[data[,1]==chr,]
# png()
plot(data2[,2],data2[,11],main=paste("chr",chr),xlab="pos",ylab="frequency difference",ylim=c(-1.2,1),pch=16,cex=0.75,col="lightsteelblue3")
sorted<-windowedScore(data2,winSize,winStep,minMarkersInWindow,p.valueCutoff)
sorted<-sorted[sorted[,1]>min(data2[,2])+winSize/2 & sorted[,1]<max(data2[,2])-winSize/2,]
#for each shift value, calculate the p score for each window
lines(sorted[,c(1,4)],col="limegreen")
peaks<-predictPeaks(sorted,minFrequencyChange,minAbsolutePeak,cutOffs,winSize,FALSE,0.2)
if(nrow(peaks)>0){
for(peakIndex in 1:nrow(peaks)){
#extract region
peak<-peaks[peakIndex,1]
direction<-peaks[peakIndex,4]
assign("shoremap_qtlmem",matrix(c(-1,-1,-1,-1,-1),nrow=1),".GlobalEnv")
lowerBoundary<-sorted[peaks[peakIndex,2],1]
upperBoundary<-sorted[peaks[peakIndex,3],1]
if(upperBoundary-lowerBoundary>1.5*winSize){
data3<-data2[data2[,2]>=lowerBoundary&data2[,2]<=upperBoundary,]
assign("shoremap_qtlData",data3,".GlobalEnv")
#pinpointing window (point estimation)
md_long<-sapply(shifts,function(shift){
windows<-floor((data3[,2]+shift)/winSize)
d<-tapply(1:length(data3[,1]),windows,function(interval){
estimateFreq_one(interval,1)-estimateFreq_one(interval,2)
})
s<-table(windows)
interval<-which(windows==unique(windows)[which.max(direction*d)])
list(md=d,best=direction*max(direction*d),interval=interval,size=s)
})
md<-c(md_long[1,],recursive=TRUE)
ms<-c(md_long[4,],recursive=TRUE)
mp<-c(sapply(shifts,function(shift){
windows<-floor((data3[,2]+shift)/winSize)
# tapply(data3[,2],windows,mean)
sapply(unique(windows),function(i) (i+0.5)*winSize-shift)
}),recursive=TRUE)
wins<-t(sapply(sort(mp,index.return=TRUE)$ix,function(i) c(mp[i],md[i],ms[i])))
wins<-wins[wins[,1]>lowerBoundary+winSize/2 & wins[,1]<upperBoundary-winSize/2 & wins[,3]>minMarkersInWindow,1:2]
#lines(wins[,1],wins[,2],col="violetred")
#adjust frequency
minIndex<-which.max(direction*wins[,2])
minDiff<-wins[minIndex,2]
minDiff<-abs(floor(minDiff*1000)/1000)
# interval<-md_long[3,which(c(md_long[2,],recursive=TRUE)==wins[minIndex,2])[1]]$interval
interval<-which(data3[,2]>=wins[minIndex,1]-winSize/2 & data3[,2]<wins[minIndex,1]+winSize/2)
roughEst<-round(mean(data3[interval,2]))
#identify window
opt<-optim(fn=maxConf,par=c(min(interval),max(interval)),minDiff=minDiff,level=0.99)
bestValue<-Inf
while(opt$value<bestValue){
bestValue<-opt$value
opt<-optim(fn=maxConf,par=c(floor(opt$par[1]),floor(opt$par[2])),minDiff=minDiff,level=0.99)
}
rect(data3[opt$par[1],2],0.96,data3[opt$par[2],2],1,col="limegreen",border="limegreen")
est<-round(optim(par=roughEst,fn=optimFn,gr=optimGr,winSize=winSize,low=data3[opt$par[1],2],high=data3[opt$par[2],2],direction=direction)$par[1])
abline(v=est,col="limegreen")
estimates<-rbind(estimates,c(chr,data3[opt$par[1],2],data3[opt$par[2],2],minDiff,est))
}
}
}
if(sum(qtls$chr==chr)>0){
apply(qtls[qtls$chr==chr,],1,function(x) {
e<-min(50,x[4])
e<-max(-50,e)
e<-round((e+50)/2)
abline(v=x[2],col=colors[e])
})
}
#print scale
chrStart<-min(data2[,2])
chrEnd<-max(data2[,2])
chrSize<-chrEnd-chrStart
scaleStart<-chrStart+chrSize/5
scaleEnd<-chrEnd-chrSize/5
scaleSize<-scaleEnd-scaleStart
dx<-scaleSize/50
starts<-seq(scaleStart,scaleEnd-1,dx)
rect(starts, -1, starts+0.9*dx, -1.1, col = colors, border = "grey");
text(scaleStart+scaleSize/2,-0.95,"effects")
text(scaleStart+scaleSize/2,-1.15,"0")
text(starts[1],-1.15,"-50")
text(max(starts)+0.9*dx,-1.15,"50")
}
dev.off()
#print estimates
header<-c("chr",colnames(qtls)[c(1:2,4:ncol(qtls))],colnames(estimates)[c(2:ncol(estimates))],"judgement","spec")
header[1]<-paste("#",header[1],sep="")
qhc<-ncol(qtls)-1
ehc<-ncol(estimates)-1
toPrint<-sapply(unique(data[,1]),function(chr){
cq<-sum(qtls$chr==chr)
ce<-sum(estimates[,1]==chr)
if(cq>0){
q<-matrix(c(qtls[qtls$chr==chr,c(1:2,4:ncol(qtls))],recursive=TRUE),ncol=qhc)
}else{
q<-1:qhc
}
e<-matrix(estimates[estimates[,1]==chr,c(2:ncol(estimates))],ncol=ehc)
if(cq>0 && ce>0){
#get True Positive
pairs<-combn(cq+ce,2)
pairs<-matrix(pairs[,pairs[1,]<=cq & pairs[2,]>cq],nrow=2)
pairs[2,]<-pairs[2,]-cq
tp<-apply(pairs,2,function(x){
q[x[1],2]>=e[x[2],1] && q[x[1],2]<=e[x[2],2]
})
toPrint<-matrix(apply(matrix(pairs[,tp],nrow=2),2,function(x) c(chr,q[x[1],],e[x[2],],"TP","")),byrow=TRUE,ncol=length(header))
#check if any of the qtls are close to a interval
close<-apply(pairs,2,function(x){
q[x[1],2]+tolerance>=e[x[2],1] && q[x[1],2]-tolerance<=e[x[2],2]
})
toPrint<-rbind(toPrint,matrix(apply(matrix(pairs[,close &!tp],nrow=2),2,function(x) c(chr,q[x[1],],e[x[2],],"FP","close")),byrow=T,ncol=length(header)))
#more FP?
for(i in 1:ce){
if(!i %in% unique(pairs[2,close]) ){
toPrint<-rbind(toPrint,matrix(c(chr,rep(NA,qhc),e[i,],"FP","" ),ncol=length(header)))
}
}
#FN
for(i in 1:cq){
if(!i %in% unique(pairs[1,close]) ){
toPrint<-rbind(toPrint,matrix(c(chr,q[i,],rep(NA,ehc),"FN","" ),ncol=length(header)))
}
}
t(toPrint)
}else if(cq>0){
#False Negative
t(matrix(apply(q,1,function(x) matrix(c(chr,x,rep(NA,ehc),"FN","" ),ncol=length(header))),ncol=length(header),byrow=TRUE))
}else if(ce>0){
#False Positive
t(matrix(apply(e,1,function(x) matrix(c(chr,rep(NA,qhc),x,"FP","" ),ncol=length(header))),ncol=length(header),byrow=TRUE))
}else{
#True Negative
t(matrix(c(chr,rep(NA,qhc+ehc),"TN","" ),ncol=length(header)))
}
})
if(is.list(toPrint)){
toPrint<-do.call(rbind,sapply(toPrint,t))
}else{
toPrint<-matrix(toPrint,ncol=length(header),byrow=TRUE)
}
colnames(toPrint)<-header
write.table(toPrint,file=paste(outprefix,".qtlEstimates.csv",sep=""),row.names=F,sep="\t",quote=F,na="")
#if(length(estimates[1,])>0){
# colnames(estimates)<-c("chr","start","stop","freqDiff","roughEst")
# write.table(estimates,file=paste(outprefix,".qtlEstimates.csv",sep=""),row.names=F,sep="\t",quote=F)
#}
q("no",0,F)
|
2efb832e3a69ff0d29e39a94c8a2e6ce5d913532
|
8d1a1d9e5238e746cf093257c896912327d9d433
|
/Exploratory Data Analysis/Project2/plot4.R
|
03fe6f1d2a564de300436a64dfc9628ee8811b44
|
[] |
no_license
|
parthpandey2000/datasciencecoursera
|
5f7d31ec0c485f83f746bf753c233e0bf7f63f1d
|
5a76e3aafe793e40f8b41c49ac65b32dc68de983
|
refs/heads/master
| 2022-11-25T12:00:16.401069
| 2020-08-06T14:42:03
| 2020-08-06T14:42:03
| 274,686,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
plot4.R
|
library(dplyr)
library(ggplot2)
url1 <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destfile1 <- "destfile.zip"
if(!file.exists(destfile1)) {
download.file(url1,
destfile = destfile1,
method = "curl")
unzip(destfile1, exdir = ".")
}
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
a <- SCC[grep("[Cc]oal",SCC$EI.Sector),]
grp <- NEI %>% subset(NEI$SCC %in% a$SCC) %>% group_by(year) %>% summarize(Total=sum(Emissions,na.rm = TRUE))
png("plot4.png")
qplot(data=grp,year,Total,col=year,xlab="Year",ylab = "Total Emissions [Tons]",main = "Total Annual Coal Combustion Emissions in the US")
dev.off()
|
5d0a6e033b769231fc4b029b36c7d02032cb3c64
|
5dbf24f17d425a49af88bd60a38996e781249411
|
/R/approx2_functions.R
|
c42403db04900f3b50c7fa7390ecf344643f0ee0
|
[] |
no_license
|
syounkin/RVsharing
|
43f3959e67a1189531b1279ed9c5e12d32bc0a95
|
6265b28d688b88ac613a3697c9e825f8e51991c7
|
refs/heads/devel
| 2020-12-18T19:28:23.132571
| 2017-06-20T14:34:21
| 2017-06-20T14:34:21
| 10,171,949
| 0
| 1
| null | 2019-07-10T19:54:06
| 2013-05-20T12:09:15
|
R
|
UTF-8
|
R
| false
| false
| 3,965
|
r
|
approx2_functions.R
|
# Utility functions for method 2 to approximate sharing probabilities in presence of unknown relationships
# By Alexandre Bureau
# 2013/06/05
infer.nalleles = function(phi,nf)
# Returns the most likely number of distinct alleles among nf founders based on mean estimated kinship phi
{
a = nf:(2*nf)
term1 = (2*nf-a)*(2*nf-a-1)/(nf*(nf-1))
term2 = 2*(a-nf)*2*(2*nf-a)/(2*(a-nf)*2*(2*nf-a) + (a-nf)*(2*(a-nf)-1) + 2*(2*nf-a)*(2*nf-a-1))
phi.vec = 0.5*term1 + 0.25*term2
phi.diff = (phi.vec - phi)
a[phi.diff>0&c(phi.diff[-1],0)<0]
#phi.vec
}
compute.phi.vec = function(nf,amin=2*nf-2)
# Compute the vector of expected phi_a for nf founders for numbers of distinct alleles a from amin to 2*nf-1
# Sequential probability computation
{
a = amin:(2*nf-1)
term1 = (2*nf-a)*(2*nf-a-1)/(nf*(nf-1))
term2 = (a-nf)*(2*nf-a)/(nf*(nf-1)) + 2*(a-nf)*(2*nf-a)/(nf*(2*nf-1))
(0.5*term1 + 0.25*term2)/(nf-1)
}
infer.theta = function(phi,phi.vec)
# Solve the parameter theta for polynomial approximation of the distribution of the number of distinct alleles
# This is a general function for polynomials of order 2 to 5. The previous version of this function was doing
# the computation for a quadratic polynomial only
# Arguments:
# phi is the mean estimated kinship between founders
# phi.vec contains phi_a for a = 2*nf-ord to 2*nf-1, where ord must be between 2 and 5
# Value:
# Real roots of the polynomial approximation
{
ord = length(phi.vec)
phi.diff = (phi - phi.vec)
coef.vec = 1/factorial(1:ord)
racines = polyroot(c(phi,phi.diff[ord:1]*coef.vec))
# Return only the real roots
Re(racines)[abs(Im(racines))<1e-10]
}
# Special cases of infer.theta which are no longer needed
infer.theta.quadratic = function(phi,phi.vec)
# Solve the parameter theta when the distribution of a is limited to values from 2*nf-4 to 2*nf
# phi is the mean estimated kinship between founders
# phi.vec contains phi_a for a = 2*nf-2 and 2*nf-1
{
phi.diff = (phi - phi.vec)
(-phi.diff[2] - sqrt(phi.diff[2]^2 - 2*phi.diff[1]*phi))/phi.diff[1]
}
infer.theta.cubic = function(phi,phi.vec)
# Solve the parameter theta when the distribution of a is limited to values from 2*nf-3 to 2*nf
# phi is the mean estimated kinship between founders
# phi.vec contains phi_a for a = 2*nf-3 to 2*nf-1
{
phi.diff = (phi - phi.vec)
racines = polyroot(c(phi,phi.diff[3:1]*c(1,1/2,1/6)))
# Return only the real roots
Re(racines)[abs(Im(racines))<1e-10]
}
infer.theta.order4 = function(phi,phi.vec)
# Solve the parameter theta when the distribution of a is limited to values from 2*nf-3 to 2*nf
# phi is the mean estimated kinship between founders
# phi.vec contains phi_a for a = 2*nf-4 to 2*nf-1
{
phi.diff = (phi - phi.vec)
racines = polyroot(c(phi,phi.diff[4:1]*c(1,1/2,1/6,1/24)))
# Return only the real roots
Re(racines)[abs(Im(racines))<1e-10]
}
infer.theta.order5 = function(phi,phi.vec)
# Solve the parameter theta when the distribution of a is limited to values from 2*nf-3 to 2*nf
# phi is the mean estimated kinship between founders
# phi.vec contains phi_a for a = 2*nf-5 to 2*nf-1
{
phi.diff = (phi - phi.vec)
racines = polyroot(c(phi,phi.diff[5:1]*c(1,1/2,1/6,1/24,1/120)))
# Return only the real roots
Re(racines)[abs(Im(racines))<1e-10]
}
get.LODallshare <- function(vec,pshare)
{
if (any(pshare$ped.tocompute.vec%in%vec)) sum(pshare$mlog10pshare[pshare$ped.tocompute.vec%in%vec])
else NA
}
# Wrappers for pedigree object
# Returns only pshare
RVsharing.ped.pshare = function(ped)
{
id = ped$id
dad.id = mom.id = numeric(length(id))
dad.id[ped$findex>0] = ped$id[ped$findex]
mom.id[ped$mindex>0] = ped$id[ped$mindex]
RVsharing(id,dad.id,mom.id)$pshare
}
# Returns object
RVsharing.ped = function(ped)
{
id = ped$id
dad.id = mom.id = numeric(length(id))
dad.id[ped$findex>0] = ped$id[ped$findex]
mom.id[ped$mindex>0] = ped$id[ped$mindex]
RVsharing(id,dad.id,mom.id)
}
|
f5e492d458957eda5635b298ba359c295111a99c
|
e36370dd2c0041b12077a99890a96b48a1092398
|
/plot4.R
|
c9cb3b0cb3653e837ba2848abf9b2685fe33147d
|
[] |
no_license
|
zekaih/ExData_Plotting1
|
5ccb1bbce7d33e139cd4040374b76f8e10957ee1
|
9133c40c5341adfddc637f40d936bc6360decc2a
|
refs/heads/master
| 2021-01-22T18:44:01.441502
| 2017-09-06T23:56:29
| 2017-09-06T23:56:29
| 102,411,695
| 0
| 0
| null | 2017-09-04T23:42:01
| 2017-09-04T23:42:00
| null |
UTF-8
|
R
| false
| false
| 990
|
r
|
plot4.R
|
#import data, rm NA values, subset date
eda <- read.table("household_power_consumption.txt",sep = ";",header=TRUE,na.strings = "?")
str(eda)
summary(eda)
eda1 <- complete.cases(eda)
eda <- eda[eda1,]
summary(eda)
eda <- subset(eda,eda$Date=="1/2/2007" | eda$Date=="2/2/2007")
#plot 4
datetime <- strptime(paste(eda$Date,eda$Time),format = "%d/%m/%Y %H:%M:%S")
png("plot4.png",width=480,height=480)
par(mfrow=c(2,2))
plot(datetime,eda$Global_active_power,type="l",xlab="",ylab="Global Active Power")
plot(datetime,eda$Voltage,type="l",ylab="Voltage",xlab="datetime")
plot(datetime,eda$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering")
lines(datetime,eda$Sub_metering_2,type = "l",col="red")
lines(datetime,eda$Sub_metering_3,type = "l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),bty="n")
plot(datetime,eda$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
253aa1620f8558b2d8811cc76c73c5546a11c195
|
b0251a873cda6b236dc46a71c0d7ac8d403dda28
|
/man/summary.DTR.Rd
|
6db5303cb9919527e742c4c48e33444e85a3aa47
|
[] |
no_license
|
yhy188/rosur
|
161a836f477ca85d091da3974fb640b372ddddb0
|
4e8d5ddd3e4102a187173232c040d53241621636
|
refs/heads/master
| 2021-01-16T10:58:23.970230
| 2020-03-18T02:18:06
| 2020-03-18T02:18:06
| 243,092,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,209
|
rd
|
summary.DTR.Rd
|
\name{summary.DTR}
\alias{summary.DTR}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summary of survival curves
}
\description{
Returns an object of class \code{summary.DTR}. See \code{DTR.object} for details.
}
\usage{
\method{summary}{DTR}(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
%% ~~Describe \code{est} here~~
the result of a call to the \code{\link{LDTestimate}} function or \code{\link{WRSEestimate}} function
}
\item{\dots}{for future methods}
}
\value{
The function returns an object of class \code{summary.DTR}. \cr
}
\seealso{
\code{\link{DTR.object}}, \code{\link{print.DTR}},
\code{\link{print.summary.DTR}}, \code{\link{plot.DTR}}
}
\examples{
\dontrun{
data("LDTdata")
est <- LDTestimate(data=LDTdata)
summary(est)}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{survival analysis}
\keyword{sequentially randomized clinical trial}
\keyword{sequentially randomized design}
\keyword{treatment sequence}
\keyword{dynamic treatment regime}
\keyword{adaptive treatment strategy}
\keyword{Inverse weighting}
|
7fbcd9eb7d756c2bd53302bb24880f91aba215ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/eechidna/examples/aec_carto_f.Rd.R
|
5d9db3f955e4e076da54aeb164f909e51788637c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,272
|
r
|
aec_carto_f.Rd.R
|
library(eechidna)
### Name: aec_carto_f
### Title: aec_carto_f - run dorling on data centers
### Aliases: aec_carto_f
### ** Examples
library(dplyr)
library(ggplot2)
data(nat_map16)
data(nat_data16)
nat_data16 <- nat_data16 %>% select(-c(x,y)) # remove existing cartogram coordinates
adelaide <- aec_extract_f(nat_data16, ctr=c(138.6, -34.9), expand=c(2,3))
adelaide_carto <- aec_carto_f(adelaide) %>% rename(id=region)
ggplot(data=nat_map16) +
geom_path(aes(x=long, y=lat, group=group, order=order),
colour="grey50") +
geom_point(data=adelaide_carto, aes(x=x, y=y), size=4, alpha=0.4,
colour="#f0027f") +
xlim(c(136, 140)) + ylim(-36, -33) +
coord_equal()
adelaide_all <- merge(adelaide, adelaide_carto, by="id")
ggplot(data=nat_map16) +
geom_path(aes(x=long, y=lat, group=group, order=order),
colour="grey50") +
geom_point(data=adelaide_all, aes(x=long_c, y=lat_c), size=2, alpha=0.4,
colour="#f0027f") +
geom_point(data=adelaide_all, aes(x=x, y=y), size=2, alpha=0.4,
colour="#f0027f") +
geom_segment(data=adelaide_all,
aes(x=long_c, xend=x, y=lat_c, yend=y), colour="#f0027f") +
xlim(c(136, 140)) + ylim(-37, -33) +
coord_equal()
|
120ba728a68c160c7e87ec4b2f4175ed087cc184
|
acbecda9b931b15996b369780b6662fbafc6f0fd
|
/R/addins.R
|
2bad3b63055fe7670938caf6cfffe8bad58f17ee
|
[] |
no_license
|
dtkaplan/etude
|
c2d4f2976bf678c56376deae57d9d9ac9f202af4
|
f0fcb59af3286202c5fab494c95abde8b904a284
|
refs/heads/master
| 2022-12-26T22:21:06.140586
| 2020-10-12T21:32:36
| 2020-10-12T21:32:36
| 207,023,456
| 1
| 1
| null | 2020-06-16T17:25:50
| 2019-09-07T20:40:16
|
HTML
|
UTF-8
|
R
| false
| false
| 3,608
|
r
|
addins.R
|
#' Addin to make a new etude exercise
#'
#' @export
#' @rdname new_etude_template
#' @param directory Path to the directory where the files go
new_etude_learnr <- function(directory = ".") {
new_etude(directory = directory, learnr = TRUE)
}
#' @export
#' @rdname new_etude_template
new_etude <- function(directory = ".",
learnr = FALSE) {
# Does the directory exist
if (!(grepl("/$", directory) || directory == "." || directory == ".."))
stop("Directory name must terminate in a forward slash /.")
tmp <- list.dirs(path = directory)
if (length(tmp) == 0)
stop("No directory <", directory, "> in which to create the file.")
while (TRUE) {
doc_contents <-
new_etude_template(save = FALSE,
learnr = learnr)
# will be saved later from editor
new_file_name <- paste(directory, attr(doc_contents, "file_name"), sep = "/")
tmp <- list.files(path = new_file_name)
if (length(tmp) == 0) { # clear to create the file
writeLines(doc_contents, con = new_file_name)
if (!rstudioapi::isAvailable())
return()
if (!rstudioapi::hasFun("navigateToFile"))
return()
rstudioapi::navigateToFile(new_file_name)
break;
}
}
}
# Addin to insert a question
#' @export
insertQ <- function(type = "-Q") {
this_doc <- rstudioapi::getActiveDocumentContext()
contents <- this_doc$contents
# figure out the document ID
id <- get_doc_ID(contents)
cat("Doc ID is", id, "\n")
# Get the next question number
chunk_id <- new_chunk_id(contents, id, type)
cat("Chunk ID is", chunk_id, "\n")
template_file <-
system.file(glue::glue("template{type}.Rmd"),
package = "etude")
new_chunk <- readLines(template_file)
new_chunk <- gsub("BLOCK_NAME",
chunk_id,
new_chunk, fixed = TRUE)
rstudioapi::insertText(
paste(new_chunk, collapse="\n"),
id = this_doc$id)
}
#' Get the id of the document
get_doc_ID <- function(contents) {
id <- paste0("document_", as.hexmode(floor(stats::runif(1, 1, 16^7))))
id_line_number <- which(grepl("^id:", contents))
if (length(id_line_number) > 0) {
id <- gsub(" +", "",
gsub("^(id:| )+(.*)$", "\\2", contents[id_line_number[1]])
)
}
id
}
clean_acroscore <- function() {
context <- rstudioapi::getActiveDocumentContext()
where <- rstudioapi::primary_selection(context)
rstudioapi::insertText(where$range,
do_clean_acroscore(where$text),
context$id)
}
new_chunk_id <- function(contents, doc_id, type = "-Q") {
line_nums <-
grep(
paste0("^```\\{r +",doc_id,
glue::glue("{type}[0-9]+[, \\}]")),
contents)
if (length(line_nums) > 0) {
nums <- regmatches(contents[line_nums],
gregexpr(glue::glue("\\{type}([0-9]+)"),
contents[line_nums]))
nums <- unlist(nums)
nums <- as.numeric(gsub("[^0-9]", "", nums))
new_num <- max(nums)+1
} else {
new_num <- 1
}
# form the new chunk ID and return
paste0(doc_id, type, new_num)
}
#' @export
etudeE <- function() etude::insertQ("-E")
#' @export
etudeS <- function() etude::insertQ("-sandbox")
#' @export
etudeQ <- function() etude::insertQ("-Q")
#' @export
etudeC <- function() etude::insertQ("-C") # Chunk
#' @export
etudeQA <- function() etude::insertQ("-QA")
#' @export
etudeTF <- function() etude::insertQ("-TF")
#' @export
etudeEssay <- function() etude::insertQ("-Essay")
#' @export
etudeQinline <- function() etude::insertQ("-Qinline")
|
4b2d7f9915d4996eaeb5a17c7e3b436e66ab08f9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/REAT/examples/betaconv.ols.Rd.R
|
2bbdbd2a5006e1b32dd690452520ad93caacfe61
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
betaconv.ols.Rd.R
|
library(REAT)
### Name: betaconv.ols
### Title: Analysis of regional beta convergence using OLS regression
### Aliases: betaconv.ols
### ** Examples
data (G.counties.gdp)
betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp$gdppc2011, 2011,
conditions = NULL, print.results = TRUE)
# Two years, no conditions (Absolute beta convergence)
regionaldummies <- to.dummy(G.counties.gdp$regional)
# Creating dummy variables for West/East
G.counties.gdp$West <- regionaldummies[,2]
G.counties.gdp$East <- regionaldummies[,1]
# Adding dummy variables to data
betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp$gdppc2011, 2011,
conditions = G.counties.gdp[c(70,71)], print.results = TRUE)
# Two years, with condition (dummy for West/East)
# (Absolute and conditional beta convergence)
betaconverg1 <- betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp$gdppc2011, 2011,
conditions = G.counties.gdp[c(70,71)], print.results = TRUE)
# Store results in object
betaconverg1$cbeta$estimates
# Addressing estimates for the conditional beta model
betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp[65:66], 2012,
conditions = NULL, print.results = TRUE)
# Three years (2010-2012), no conditions (Absolute beta convergence)
betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp[65:66], 2012,
conditions = G.counties.gdp[c(70,71)], print.results = TRUE)
# Three years (2010-2012), with conditions (Absolute and conditional beta convergence)
betaconverg2 <- betaconv.ols (G.counties.gdp$gdppc2010, 2010, G.counties.gdp[65:66],
2012, conditions = G.counties.gdp[c(70,71)], print.results = TRUE)
# Store results in object
betaconverg2$cbeta$estimates
# Addressing estimates for the conditional beta model
|
4312c316f9dc95b2db197acfae427d10925f994e
|
854cf62a5df3c60b0b3e9521606366cf0bc3c010
|
/load-csv-data.R
|
d720279bbfc123f786700e006a0baf00ac5c71f3
|
[] |
no_license
|
mcwachanga/Intro-to-r
|
b3471e2477749d0f3fc81e147aaf404d728403a6
|
ae9f34e4428da974d9deca84e07c1912e85f74d7
|
refs/heads/master
| 2020-03-27T04:52:29.495724
| 2018-08-24T10:50:40
| 2018-08-24T10:50:40
| 145,975,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
load-csv-data.R
|
# read a csv file into a dataframe
df <- read.csv("data/2013.csv", header = TRUE)
# read the first five lines
head(df)
# get summary statistics
summary(df)
|
a08ec64a9eb9c52c8ca48a19fdf5f324e3360fc6
|
446373433355171cdb65266ac3b24d03e884bb5d
|
/R/saga_metricconversions.R
|
fc71718cd4887c9b0e187bd9d51835ad5e1de853
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/r_package_qgis
|
233a49cbdb590ebc5b38d197cd38441888c8a6f3
|
8a5130ad98c4405085a09913b535a94b4a2a4fc3
|
refs/heads/master
| 2023-06-27T11:52:21.538634
| 2021-08-01T01:05:01
| 2021-08-01T01:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
r
|
saga_metricconversions.R
|
##' QGIS Algorithm provided by SAGA Metric conversions (saga:metricconversions)
##'
##' @title QGIS algorithm Metric conversions
##'
##' @param GRID `raster` - Grid. Path to a raster layer.
##' @param CONVERSION `enum` of `("[0] radians to degree", "[1] degree to radians", "[2] Celsius to Fahrenheit", "[3] Fahrenheit to Celsius")` - Conversion. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.
##' @param CONV `rasterDestination` - Converted Grid. Path for new raster layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * CONV - outputRaster - Converted Grid
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
saga_metricconversions <- function(GRID = qgisprocess::qgis_default_value(), CONVERSION = qgisprocess::qgis_default_value(), CONV = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("saga:metricconversions")
output <- qgisprocess::qgis_run_algorithm("saga:metricconversions", `GRID` = GRID, `CONVERSION` = CONVERSION, `CONV` = CONV,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "CONV")
}
}
|
64830409e6bdebc6644cdd72e4d47c4e3e3105b5
|
6cf7b035125b9ab0a4ee92301263b06cae46e352
|
/Scripts/dplyr.R
|
d6b8dd298d78b6f2808211d75f09e21d450d60aa
|
[] |
no_license
|
abelgGit/2017_06_06_R_tidyverse
|
98400cbaf70f5b582262d7a52106ecd68c5ab6ed
|
6a3eeb2ab713be3c5f7239193f59ced8efaea33f
|
refs/heads/master
| 2021-01-25T04:49:43.440849
| 2017-06-07T13:00:53
| 2017-06-07T13:00:53
| 93,488,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,132
|
r
|
dplyr.R
|
library(tidyverse)
gapminder <- read_csv("Data/gapminder-FiveYearData.csv")
rep("This is an example", times=3)
"This is an example" %>% rep(times=3)
year_country_gdp <- select(gapminder, year, country, gdpPercap)
head(year_country_gdp)
year_country_gdp <- gapminder %>%
select(year, country, gdpPercap)
head(year_country_gdp)
gapminder %>%
filter(year==2002) %>%
ggplot(mapping = aes(x=continent, y=pop)) +
geom_boxplot()
year_country_gdp_euro <- gapminder %>%
filter(continent=="Europe") %>%
select(year, country, gdpPercap)
year_country_gdp
country_lifeExp_Norway <- gapminder %>%
filter(country == "Norway") %>%
select(year,lifeExp,gdpPercap)
country_lifeExp_Norway
gapminder %>%
group_by(continent) %>%
summarize(mean_gdpPercap = mean(gdpPercap))
gapminder %>%
group_by(continent) %>%
summarize(mean_gdpPercap = mean(gdpPercap)) %>%
ggplot(mapping = aes(x = continent, y = mean_gdpPercap)) +
geom_bar(stat = "identity")
gapminder %>%
filter(continent == "Asia") %>%
group_by(country) %>%
summarize(mean_lifeExp = mean(lifeExp)) %>%
ggplot(mapping = aes(y=country, x=mean_lifeExp))+
geom_point()+
order()
gapminder %>%
mutate(gdp_billion = gdpPercap * pop / 10^9) %>%
head()
gapminder %>%
mutate(gdp_billion = gdpPercap * pop / 10^9)%>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdp_billion)
gapminder %>%
mutate(gdp_billion = gdpPercap * pop / 10^9)%>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdpPercap),
sd_gdpPercap = sd(gdpPercap),
mean_pop = mean(pop),
sd_pop = sd(pop),
mean_gdp_billion = mean(gdp_billion),
sd_gdp_billion = sd(gdp_billion))
gapminder_country_summary <- gapminder %>%
group_by(country) %>%
summarize(mean_lifeExp = mean(lifeExp))
library (maps)
map_data("world")%>%
rename(country=region)%>%
left_join(gapminder_country_summary, by="country")%>%
ggplot()+
geom_polygon(aes(x=long, y=lat, group=group, fill = mean_lifeExp))+
scale_fill_gradient(low="blue", high="Red")+
coord_equal()
ggsave("My_worldmap.png")
|
06e207e3dd28c1b2d5b4e2bc409b99c7ae5566ea
|
b09fe02978b3ee250813d135a6767006d468c166
|
/man/remove_bigram_stopwords.Rd
|
1857138908a93184ae5dcad314b4c1ac3fb08bc2
|
[] |
no_license
|
scottfrechette/funcyfrech
|
098f3794b83fe7cd8871f91125eac2a5861255cb
|
0d3aa005b81d91807f1ba2700d80d15f4f9cc04c
|
refs/heads/master
| 2022-08-29T15:59:27.927431
| 2022-08-26T02:31:43
| 2022-08-26T02:31:43
| 213,962,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 439
|
rd
|
remove_bigram_stopwords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_bigram_stopwords.R
\name{remove_bigram_stopwords}
\alias{remove_bigram_stopwords}
\title{Remove stop words from bigrams}
\usage{
remove_bigram_stopwords(df, bigrams, char_only = TRUE)
}
\arguments{
\item{df}{A tibble containing bigrams}
\item{char_only}{Remove numbers}
\item{bigram}{Column of bigrams}
}
\description{
Remove stop words from bigrams
}
|
4c2fa9c5e554d6cc3c67403b49ed9135f752d823
|
968c8f8ca03319c455303f0c46346021a4b203e2
|
/man/model_summary_table.Rd
|
0650893116fd0e2a26dd0041fa0c18c37db08590
|
[] |
no_license
|
rnaimehaom/AutoModel
|
2d4ed1d9cb240ed87ed64c83b63765117d003653
|
b78615d51ef101758cdfcd1bdcce2c7edb02d7e4
|
refs/heads/master
| 2023-03-18T06:10:16.550971
| 2015-08-12T21:29:38
| 2015-08-12T21:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 886
|
rd
|
model_summary_table.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/runmodel.R
\name{model_summary_table}
\alias{model_summary_table}
\title{Hierarchical regression: model summary output}
\usage{
model_summary_table(models, formulas)
}
\arguments{
\item{models}{A list of \code{lm} model objects. A set of model objects
created by \code{create_model_object}.}
\item{formulas}{Formula list produced by \code{create_formula_objects}.}
}
\description{
Hierarchical regression: model summary output
}
\details{
Creates table output to summarize model statistics for all models in
a hierarchical regression analysis.
}
\examples{
freeny_model_formulas <- create_formula_objects("y",
c("lag.quarterly.revenue"), c("price.index"))
freeny_models <- create_model_objects(freeny_model_formulas,
dataset = freeny)
model_summary_table(freeny_models, freeny_model_formulas)
}
|
a08ac8e710d93f52aaebb67261be1efa314b02d3
|
efcd73d82923061a933661202415da88f6f0975a
|
/man/RandomARMod_nlin1.Rd
|
2044c965cf953ff9f3c45f518f044caef4b9b071
|
[] |
no_license
|
SimoneHermann/rexpar
|
624b0d30bd3fde12a5e908bd90057dc6d260459a
|
3883b9f8aa1685c28979c621d427ae3080a1cd8e
|
refs/heads/master
| 2021-01-21T15:33:59.129522
| 2015-06-22T12:03:59
| 2015-06-22T12:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,018
|
rd
|
RandomARMod_nlin1.Rd
|
\name{RandomARMod_nlin1}
\alias{RandomARMod_nlin1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Ramdom non-linear AR(1) Series
}
\description{
The function generates an random non-linear AR(1) process with given power, autoregression and starting
value. Further the errors can be specified by 4 predefined examples. The main equation
is given by \deqn{y_n = y_n-1 + theta_1 * y_{n-1}^theta_3 + e_n,} \eqn{y_0} fixed.
}
\usage{
RandomARMod_nlin1(nobs,arp,power,start,cont)
}
\arguments{
\item{nobs}{
number of observations for the process to generate
}
\item{arp}{
autoregressive parameter theta_1
}
\item{power}{
power parameter theta_3
}
\item{start}{
starting value of the process y_0
}
\item{cont}{
error distribution defined by value in (0,1,2,3,4).
0 : e_n is i.i.d. N(0,0.1) distributed
1 : e_n is i.i.d. N(0,0.1) distributed, but in Pois(5/100) drawn points in time N(5,1) errors are added
2 : e_n is i.i.d. Gumbel distributed with parameters 10 and -3.3661513
3 : e_n is i.i.d. Frechet distributed with parameters 1.928, -2, 10
4 : e_n is i.i.d. Gamma distributed with paramters 1 and 1. Further the errors are centered by -0.6932 to satisfy a med(E_n)=0 condition
}
}
\details{
All error distributions are chosen to satistify med(E_n)=0.
}
\value{
the function returns a vector (y_0,...,y_N) which is a simulation of the AR process given by the input paramters
}
\references{
Kustosz, C. (2014). Implementation of simplicial depth for AR processes under non standard conditions. Working Paper, TU Dortmund.
}
\author{
Kustosz, Christoph
}
%\note{
%
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{dS1_lin2}}, \code{\link{dS2_lin2}}, \code{\link{dS3_lin2}}, \code{\link{dS_lin2}}
}
\examples{
y<-RandomARMod_nlin1(300,0.005,1.002,15,0)
plot(y,type="l")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9e258ac21e33ad548f980bbf33200fa6c5cc4c90
|
32f74e60d35002d7c148cdfd5e3353b90c82ce5c
|
/man/plot_biv_olr.Rd
|
4c038c0b19ac8a4036c6182dda419a0c89dd81a5
|
[] |
no_license
|
agdamsbo/daDoctoR
|
152adf49feb5d329e20c19d0afdd708d74de68cc
|
a0c4b3f9c3cf7a172fb919116fe41df4a70236e8
|
refs/heads/master
| 2022-09-25T15:12:39.699092
| 2022-08-26T13:07:49
| 2022-08-26T13:07:49
| 151,308,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,347
|
rd
|
plot_biv_olr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_biv_olr.R
\name{plot_biv_olr}
\alias{plot_biv_olr}
\title{Forrest plot from ordinal logistic regression, version2 of plot_ord_ords().}
\usage{
plot_biv_olr(
meas,
vars,
data,
title = NULL,
dec = 3,
lbls = NULL,
hori = "OR (95 \% CI)",
vert = "Variables",
short = FALSE,
analysis = c("biv", "multi")
)
}
\arguments{
\item{meas}{outcome meassure variable name or response in data-data.frame as a string. Should be factor, preferably ordered.}
\item{vars}{variables to compare against. As vector of columnnames.}
\item{data}{dataframe of data.}
\item{title}{plot title}
\item{dec}{decimals for labels}
\item{lbls}{labels for variable names. Carefull, as the right order is not checked automatically!}
\item{hori}{labels the horizontal axis (this i the y axis as the plot is rotated)}
\item{vert}{labels the horizontal axis (this i the x axis as the plot is rotated)}
\item{short}{flag to half number of ticks on horizontal axis.}
\item{analysis}{can be either "biv", or "multi", for creation of forest plot from either bivariate (unadjusted) or multivariate (adjusted) ordinal logistic regression.}
}
\description{
Heavily inspired by https://www.r-bloggers.com/plotting-odds-ratios-aka-a-forrestplot-with-ggplot2/
}
\keyword{forestplot}
|
7e14b390b0bef5cb7f240ed858d2b99d6de94e64
|
4ac86a3396861bcd055018b5f192487854c13de8
|
/R/channelMorphology.r
|
e2fc200264e32dd2d85aeb16a82a835d59b6699c
|
[] |
no_license
|
mengeln/phabMetrics
|
cf0510ba24249b88ad445b88f7cd4a94eb898f18
|
d373790bbbcdabaac65a8c0beb864913c144d488
|
refs/heads/master
| 2021-01-01T05:46:52.454248
| 2014-03-19T21:52:20
| 2014-03-19T21:52:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,794
|
r
|
channelMorphology.r
|
channelMorphology <- function(datum){
data <- subset(datum, AnalyteName %in% c("Cascade/Falls",
"Dry",
"Glide",
"Pool",
"Rapid",
"Riffle",
"Run"
) &
ResQualCode == "=")
if(nrow(data) == 0)return(data.frame("SampleID"=NULL,
"metric"=NULL,
"mean"=NULL,
"sd"=NULL,
"count"=NULL))
data$Location2 <- as.character(data$LocationCode)
data$Result <- as.numeric(data$Result)
metrics <- c(Shannon_Flow = function(x)parent.frame(6)$diverse,
PCT_CF = function(x)sum(x$Result[x$AnalyteName == 'Cascade/Falls']),
PCT_DR = function(x)sum(x$Result[x$AnalyteName == 'Dry']),
PCT_GL = function(x)x$Result[x$AnalyteName == 'Glide'],
PCT_POOL = function(x)sum(x$Result[x$AnalyteName == 'Pool']),
PCT_RA = function(x)sum(x$Result[x$AnalyteName == 'Rapid']),
PCT_RI = function(x)sum(x$Result[x$AnalyteName == 'Riffle']),
PCT_RN = function(x)sum(x$Result[x$AnalyteName == 'Run']),
PCT_FAST = function(x)sum(x$Result[x$AnalyteName %in% c('Cascade/Falls', 'Rapid', 'Riffle', 'Run')]),
PCT_SLOW = function(x)sum(x$Result[x$AnalyteName %in% c('Pool', 'Glide')]),
PCT_CF_WT = function(x)sum(x$Result[x$AnalyteName == 'Cascade/Falls']) * x$wt[1],
PCT_GL_WT = function(x)sum(x$Result[x$AnalyteName == 'Glide']) * x$wt[1],
PCT_POOL_WT = function(x)sum(x$Result[x$AnalyteName == 'Pool']) * x$wt[1],
PCT_RA_WT = function(x)sum(x$Result[x$AnalyteName == 'Rapid']) * x$wt[1],
PCT_RI_WT = function(x)sum(x$Result[x$AnalyteName == 'Riffle']) * x$wt[1],
PCT_RN_WT = function(x)sum(x$Result[x$AnalyteName == 'Run']) * x$wt[1],
PCT_FAST_WT = function(x)sum(x$Result[x$AnalyteName %in% c('Cascade/Falls', 'Rapid', 'Riffle', 'Run')]) * x$wt[1],
PCT_SLOW_WT = function(x)sum(x$Result[x$AnalyteName %in% c('Pool', 'Glide')])* x$wt[1]
)
channelMetrics <- metricCalc("d$wt <- sum(d$Result[d$AnalyteName %in% c('Cascade/Falls', 'Rapid', 'Riffle', 'Run', 'Glide', 'Pool')])/100",
"diverse <- diversity(l[l$AnalyteName != 'Dry', 'Result'])")
result <- channelMetrics(data, metrics)
result$count <- rep(tapply(data$Location2, data$SampleID, length)/7, each=length(metrics))
depth <- subset(datum, AnalyteName == "StationWaterDepth")
if(nrow(depth) == 0)
depth_result <- data.frame("SampleID"=NULL,
"metric"=NULL,
"mean"=NULL,
"sd"=NULL,
"count"=NULL)
else {
depth$Result <- as.numeric(depth$Result)
depth$Location2 <- sapply(strsplit(as.character(depth$LocationCode), ","), function(x)x[1])
depth_result <- metricCalc(NULL)(depth, c(XWDEPTH = function(x)sum(x$Result),
XWDM = function(x)max(x$Result)))
}
width <- subset(datum, AnalyteName == "Wetted Width" & LocationCode != "X")
if(nrow(width) == 0)
width_result <- data.frame("SampleID"=NULL,
"metric"=NULL,
"mean"=NULL,
"sd"=NULL,
"count"=NULL)
else {
width$Result <- as.numeric(width$Result)
width$Location2 <- sapply(strsplit(as.character(width$LocationCode), ","), function(x)x[1])
width_result <- metricCalc(NULL)(width, c(XWIDTH = function(x)sum(x$Result)))
widthdepth <- merge(width_result, depth_result[depth_result$metric == "XWDEPTH", ], by="SampleID")
names(widthdepth) <- c("SampleID", "metric.x", "width", "sd.x", "count.x", "metric.y",
"depth", "sd.y", "count.y")
XWDR <- data.frame(cbind(widthdepth$SampleID, rep("XWDR", nrow(widthdepth)),
widthdepth$width / widthdepth$depth,
rep(NA, nrow(widthdepth)), rep(NA, nrow(widthdepth))),
stringsAsFactors=FALSE)
names(XWDR) <- c("SampleID", "metric", "mean", "sd", "count")
XWDA <- data.frame(cbind(widthdepth$SampleID, rep("XWDA", nrow(widthdepth)),
widthdepth$width * (widthdepth$depth/100),
rep(NA, nrow(widthdepth)), rep(NA, nrow(widthdepth))),
stringsAsFactors=FALSE)
names(XWDA) <- c("SampleID", "metric", "mean", "sd", "count")
width_result <- rbind(XWDA, XWDR, width_result)
}
velocity <- subset(datum, AnalyteName == "Velocity" & LocationCode == "X")
if(nrow(velocity) == 0)
velocity_result <- data.frame("SampleID"=NULL,
"metric"=NULL,
"mean"=NULL,
"sd"=NULL,
"count"=NULL)
else {
velocity$Result <- as.numeric(velocity$Result)
velocity_result <- ddply(velocity, "SampleID", function(df){
data.frame("SampleID" = rep(unique(df$SampleID), 3),
"metric" = c("XWV", "MXWV", "PWVZ"),
"mean" = c(mean(df$Result), max(df$Result), sum(df$Result == 0)/nrow(df)),
"sd" = c(sd(df$Result), NA, NA),
"count" = rep(nrow(df), 3))
})
}
rbind(result, depth_result, width_result, velocity_result)
}
|
d85bb8ec3c6b69c965a3b33e51a50391a25192e5
|
5b097ec0429f848d48bfd60c299188316da9700a
|
/R/data.R
|
de1219f66e23ba90c5a24d82211d75b7bf77a89d
|
[] |
no_license
|
beckymaust/kwmatch
|
2fbe6c1c567af1f4fec1635bd4719c65de7e9036
|
537b926fa0bc6527b699f5306e14dfc4989b0bdc
|
refs/heads/master
| 2021-01-02T22:58:49.719101
| 2015-03-28T18:28:59
| 2015-03-28T18:28:59
| 33,041,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
data.R
|
#' Titles and abstracts of articles from the Open Journal of Statistics
#'
#' A dataset contain the titles and the first sentence of the abstract
#' for 10 different articles in the Open Journal of Statistics.
#'
#' @format A data frame with 10 rows and 2 variables:
#' \describe{
#' \item{Title}{Title of article}
#' \item{Abstract}{The first sentence of the abstract}
#' }
#' @source \url{http://www.scirp.org/journal/ojs/}
"journal"
|
d7370fa760de64866cdbbdafcf43b406788c8fde
|
95c0ed78f00bc1cb13d4e3611ff1722dd019631b
|
/src/plot_HJ_coding.R
|
31f49932ab1b3c1899f6d593e4e6a5a3f037d50f
|
[] |
no_license
|
rosemm/context_word_seg
|
bd40e2a02cbf62d4a96ef796be8e9e81cbded63e
|
0f145ae90dfc8eba5fcf2beeb3034d65c106f784
|
refs/heads/master
| 2020-05-22T01:46:32.125165
| 2016-12-20T18:04:55
| 2016-12-20T18:04:55
| 42,607,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,859
|
r
|
plot_HJ_coding.R
|
votes <- select(df_HJ_raw, -utt, -orth, -phon)
##########################################
# number of utterances per context
##########################################
counts <- sort(colSums(votes, na.rm = TRUE), decreasing = TRUE)
count.data <- data.frame(n.utts=counts,
context=factor(names(counts), levels=names(counts)))
# over the median number of utterances?
count.data$over.med <- ifelse(count.data$n.utts > median(counts), "yes", "no")
ggplot(count.data, aes(x=context, y=n.utts, fill=over.med)) +
geom_bar(stat = "identity", show.legend = F) +
geom_text(aes(label=n.utts), position=position_dodge(width=0.9), vjust=-0.25)+
scale_fill_manual(values = c("yes" = "darkgreen","no" = "grey")) +
labs(x=NULL, y="Number of votes for each context") +
theme(axis.text.x = element_text(angle=330, vjust=1, hjust=0))
ggsave("graphs/HJ/votesper_context.png", width=16, height=5, units="in")
ggplot(count.data, aes(x=context, y=n.utts, fill=over.med)) +
geom_bar(stat = "identity", show.legend = F) +
geom_text(aes(label=n.utts), position=position_dodge(width=0.9), vjust=-0.25)+
scale_fill_manual(values = c("yes" = "darkgreen","no" = "grey")) +
labs(x=NULL, y="log(Number of votes)") +
scale_y_log10() +
theme(axis.text.x = element_text(angle=330, vjust=1, hjust=0))
ggsave("graphs/HJ/votesper_context_logtrans.png", width=16, height=5, units="in")
################################################
# utterances actually assigned to each context
################################################
utterances <- select(df_HJ_bin, -utt, -orth, -phon)
counts <- sort(colSums(utterances, na.rm = TRUE), decreasing = TRUE)
count.data <- data.frame(n.utts=counts,
context=factor(names(counts), levels=names(counts)))
# over the median number of utterances?
count.data$over.med <- ifelse(count.data$n.utts > median(counts), "yes", "no")
ggplot(count.data, aes(x=context, y=n.utts, fill=over.med)) +
geom_bar(stat = "identity", show.legend = F) +
geom_text(aes(label=n.utts), position=position_dodge(width=0.9), vjust=-0.25)+
scale_fill_manual(values = c("yes" = "darkgreen","no" = "grey")) +
labs(x=NULL, y="Number of utterances assigned to each context") +
theme(axis.text.x = element_text(angle=330, vjust=1, hjust=0))
ggsave("graphs/HJ/uttsper_context.png", width=16, height=5, units="in")
ggplot(count.data, aes(x=context, y=n.utts, fill=over.med)) +
geom_bar(stat = "identity", show.legend = F) +
geom_text(aes(label=n.utts), position=position_dodge(width=0.9), vjust=-0.25)+
scale_fill_manual(values = c("yes" = "darkgreen","no" = "grey")) +
labs(x=NULL, y="log(Number of utterances assigned)") +
scale_y_log10() +
theme(axis.text.x = element_text(angle=330, vjust=1, hjust=0))
ggsave("graphs/HJ/uttsper_context_logtrans.png", width=16, height=5, units="in")
|
45987faaa257c99b27f04a4514f087c91ce2e26b
|
72b9ec2d3e7c59bbe45e4d9cab824592fc82609b
|
/R/boot.r
|
792d186bc8c381d27ceeeeda9f3d7859c901cff1
|
[] |
no_license
|
b-steve/ascr
|
a6f9acd90e7ce03f81333b9572e233a482c074b5
|
80b8b1d3e48b26a2b452ca9c40fddd91d1d7b3ee
|
refs/heads/master
| 2023-05-04T20:41:36.748148
| 2022-08-11T21:56:23
| 2022-08-11T21:56:25
| 68,281,550
| 8
| 8
| null | 2023-03-13T17:06:04
| 2016-09-15T09:26:18
|
R
|
UTF-8
|
R
| false
| false
| 14,050
|
r
|
boot.r
|
#' Bootstrapping a fitted ascr model
#'
#' Carries out a parametric bootstrap, based on a model fitted using
#' \link{fit.ascr}.
#'
#' For each bootstrap resample, a new population of individuals is
#' simulated within the mask area. Detections of these individuals are
#' simulated using the estimated detection function. For detected
#' individuals, additional information is simulated from the estimated
#' distribution of measurement error. The original model is then
#' re-fitted to these simulated data, and parameter estimates for each
#' iteration saved in the component \code{boot} of the returned list.
#'
#' Note that generic functions \link{stdEr} and \link{vcov} with an
#' object of class \code{ascr.boot} as the main argument will
#' return standard errors and the variance-covariance matrix for
#' estimated parameters \emph{based on the bootstrap procedure} (via
#' the \link{stdEr.ascr.boot} and \link{vcov.ascr.boot}
#' methods). For standard errors and the variance-covariance matrix
#' based on maximum likelihood asymptotic theory, the methods
#' \link{stdEr.ascr} and \link{vcov.ascr} must be called
#' directly.
#'
#' If \code{infotypes} is provided it should take the form of a list,
#' where each component is a subset of information types (i.e.,
#' \code{fit$infotypes}) used to fit the original model. A \code{NULL}
#' component is associated with no additional information. The
#' bootstrap procedure is then repeated for each component, only
#' utilising the appropriate additional information. In practice this
#' is only useful if the user is looking to investigate the benefits
#' of including particular information types. The results from these
#' extra bootstrap procedures can be found in the
#' \code{boot$extra.boots} component of the returned object.
#'
#' @section Bootstrapping for acoustic surveys:
#'
#' For fits based on acoustic surveys where the argument
#' \code{cue.rates} is provided to the \code{fit.ascr} function, the
#' simulated data allocates multiple calls to the same location based
#' on an estimated distribution of the call frequencies. Using a
#' parametric bootstrap is currently the only way parameter
#' uncertainty can be estimated for such models. See Stevenson et
#' al. (2015) for details.
#'
#' @section Monte Carlo error:
#'
#' There will be some error in esimates based on the parametric
#' bootstrap (e.g., standard errors and estimates of bias) because the
#' number of bootstrap simulations is not infinite. By default, this
#' function calculates Monte Carlo error using a bootstrap of the
#' estimates obtained from the initial bootstrap procedure; see
#' Equation (9) in Koehler, Brown and Haneuse (2009). Note that this
#' secondary bootstrap does not require the fitting of any further
#' models, and so the increased processing time due to this procedure
#' is negligible.
#'
#' Monte Carlo error for standard errors and estimates of bias can be
#' extracted using the function \link{get.mce}.
#'
#' @references Koehler, E., Brown, E., and Haneuse, S. J.-P. A. (2009)
#' On the assessment of Monte Carlo error in sumulation-based
#' statistical analyses. \emph{The American Statistician},
#' \strong{63}: 155--162.
#'
#' @references Stevenson, B. C., Borchers, D. L., Altwegg, R., Swift,
#' R. J., and Gillespie, D. M., and Measey, G. J. (2015) A general
#' framework for animal density estimation from acoustic
#' detections across a fixed microphone array. \emph{Methods in
#' Ecology and Evolution}, \strong{6}(1): 38--48.
#'
#' @return A list of class \code{"ascr.boot"}. Components contain
#' information such as estimated parameters and standard
#' errors. The best way to access such information, however, is
#' through the variety of helper functions provided by the
#' ascr package. S3 methods \link{stdEr.ascr.boot} and
#' \link{vcov.ascr.boot} can be used to return standard errors
#' and the variance-covariance matrix of estimated parameters
#' based on the bootstrap procedure.
#'
#' @param fit A fitted \code{ascr} model object.
#' @param N The number of bootstrap resamples.
#' @param prog Logical, if \code{TRUE}, a progress bar is shown. Only
#' available if \code{n.cores} is 1.
#' @param n.cores A positive integer representing the number of cores
#' to use for parallel processing.
#' @param M The number of bootstrap resamples for the secondary
#' bootstrap used to calculate Monte Carlo error. See 'Details'
#' below. If M = 0, then this is skipped.
#' @param infotypes A list, where each component contains information
#' types for subsequent bootstrap procedures. See 'Details'.
#'
#'
#'
#' @examples
#' \dontrun{
#' ## In practice, N should be >> 100, but this leads to long computation time for a simple example.
#' boot.fit <- boot.ascr(fit = example.data$fits$simple.hn, N = 100)
#' }
#'
#' @export
boot.ascr <- function(fit, N, prog = TRUE, n.cores = 1, M = 10000, infotypes = NULL){
args <- fit$args
orig.sv <- args$sv
## Set start values to estimated parameters.
args$sv <- get.par(fit, "fitted", as.list = TRUE)
## Removing scalefactors.
args$sf <- NULL
## Setting trace to false.
args$trace <- FALSE
## Don't calculate the Hessian for bootstrap fits.
args$hess <- FALSE
## Setting start value for g0 away from 1.
if ("g0" %in% names(args$sv)){
args$sv[["g0"]] <- min(c(0.95, args$sv[["g0"]]))
}
cue.rates <- args$cue.rates
coefs <- fit$coefficients
par.names <- names(coefs)
n.pars <- length(coefs)
seeds <- sample(1:1e8, size = N)
seed.mce <- sample(1:1e8, size = 1)
## Function to get fit.boot.
FUN <- function(i, fit, args, cue.rates, infotypes, seeds, prog){
set.seed(seeds[i])
if (fit$n.sessions > 1){
## Simulating capture history.
args$capt <- lapply(sim.capt(fit), function(x) x[c("bincapt", infotypes)])
n.dets <- sum(sapply(args$capt, function(x) nrow(x$bincapt)))
} else {
## Simulating capture history.
args$capt <- sim.capt(fit)[c("bincapt", infotypes)]
n.dets <- nrow(args$capt$bincapt)
}
## If no calls simulated, set density to 0 and other parameters to NA.
if (n.dets == 0){
n.par <- length(fit$coefficients)
out <- rep(NA, n.par + 1)
out[names(fit$coefficients) %in% c("D", "Da")] <- 0
out[names(fit$coefficients) == "D_link"] <- -Inf
} else {
## Simulating calling frequencies (if required).
if (fit$fit.freqs){
if (length(cue.rates) > 1){
args$cue.rates <- sample(cue.rates, replace = TRUE)
}
}
## Fitting model.
fit.boot <- suppressWarnings(try(do.call("fit.ascr", args), silent = TRUE))
## If unconverged, refit model with default start values.
if ("try-error" %in% class(fit.boot) || fit.boot$maxgrad < -0.01){
args$sv <- NULL
fit.boot <- suppressWarnings(try(do.call("fit.ascr", args), silent = TRUE))
}
## If still unconverged, give up and report NA.
if ("try-error" %in% class(fit.boot) || fit.boot$maxgrad < -0.01){
n.par <- length(fit$coefficients)
out <- rep(NA, n.par + 1)
} else {
out <- c(fit.boot$coefficients, fit.boot$maxgrad)
}
}
if (prog){
cat(i, "\n", file = "prog.txt", append = TRUE)
}
out
}
if (n.cores == 1){
## Main bootstrap.
res <- matrix(0, nrow = N, ncol = n.pars + 1)
colnames(res) <- c(par.names, "maxgrad")
## Setting up progress bar.
if (prog){
pb <- txtProgressBar(min = 0, max = N, style = 3)
}
for (i in 1:N){
res[i, ] <- FUN(i, fit = fit, args = args, cue.rates = cue.rates,
infotypes = fit$infotypes, seeds = seeds, prog = FALSE)
## Updating progress bar.
if (prog){
setTxtProgressBar(pb, i)
}
}
## Closing progress bar.
if (prog){
close(pb)
}
## Additional bootstraps.
extra.res <- vector(mode = "list", length = length(infotypes))
names(extra.res) <- names(infotypes)
for (i in seq(from = 1, by = 1, along.with = infotypes)){
new.args <- args
new.args$capt <- args$capt[c("bincapt", infotypes[[i]])]
new.fit <- suppressWarnings(do.call("fit.ascr", new.args))
new.n.pars <- length(new.fit$coefficients)
new.par.names <- names(new.fit$coefficients)
extra.res[[i]] <- matrix(0, nrow = N, ncol = new.n.pars + 1)
colnames(extra.res[[i]]) <- c(new.par.names, "maxgrad")
## Setting up another progress bar.
if (prog){
pb <- txtProgressBar(min = 0, max = N, style = 3)
}
for (j in 1:N){
extra.res[[i]][j, ] <- suppressWarnings(FUN(j, fit = fit, args = args,
cue.rates = cue.rates,
infotypes = infotypes[[i]],
seeds = seeds, prog = FALSE))
## Updating progress bar.
if (prog){
setTxtProgressBar(pb, j)
}
}
## Closing progress bar.
if (prog){
close(pb)
}
}
} else {
if (n.cores > detectCores()){
stop("The argument n.cores is greater than the number of available cores.")
}
cluster <- makeCluster(n.cores)
clusterEvalQ(cluster, {
library(ascr)
})
## Main bootstrap.
if (prog){
file.create("prog.txt")
}
## IF THERE'S AN ERROR HERE YOU NEED TO REBUILD THE PACKAGE
## DUE TO library() CALL ABOVE.
res <- t(parSapplyLB(cluster, 1:N, FUN, fit = fit, args = args,
cue.rates = cue.rates, infotypes = fit$infotypes,
seeds = seeds, prog = prog))
if (prog){
unlink("prog.txt")
}
## Additional bootstrap.
extra.res <- vector(mode = "list", length = length(infotypes))
names(extra.res) <- names(infotypes)
for (i in seq(from = 1, by = 1, along.with = infotypes)){
if (prog){
file.create("prog.txt")
}
extra.res[[i]] <- t(parSapplyLB(cluster, 1:N, FUN, fit = fit,
args = args, cue.rates = cue.rates,
infotypes = infotypes[[i]],
seeds = seeds, prog = prog))
## Removing maximum gradient component.
extra.res[[i]] <- extra.res[[i]][, -ncol(extra.res[[i]])]
if (prog){
unlink("prog.txt")
}
}
stopCluster(cluster)
}
## Calculating bootstrapped standard errors, correlations and
## covariances.
maxgrads <- res[, ncol(res)]
## Removing maximum gradient component.
res <- res[, -ncol(res), drop = FALSE]
se <- apply(res, 2, sd, na.rm = TRUE)
names(se) <- par.names
cor <- diag(n.pars)
dimnames(cor) <- list(par.names, par.names)
vcov <- diag(se^2)
dimnames(vcov) <- list(par.names, par.names)
for (i in 1:(n.pars - 1)){
for (j in (i + 1):n.pars){
cor[i, j] <- cor[j, i] <- cor(res[, i], res[, j], use = "na.or.complete")
vcov[i, j] <- vcov[j, i] <- cor[i, j]*se[i]*se[j]
}
}
bias <- apply(res, 2, mean, na.rm = TRUE) - coefs
## Bootstrap to calculate MCE for bias and standard errors.
bias.mce <- se.mce <- numeric(n.pars)
names(bias.mce) <- names(se.mce) <- par.names
if (M > 0){
set.seed(seed.mce)
converged <- which(!is.na(res[, 1]))
n.converged <- length(converged)
mce.boot <- matrix(sample(converged, size = n.converged*M,
replace = TRUE), nrow = M,
ncol = n.converged)
for (i in par.names){
par.boot <- matrix(res[mce.boot, i], nrow = M, ncol = n.converged)
bias.mce[i] <- sd(apply(par.boot, 1, mean) - coefs[i] - bias[i])
se.mce[i] <- sd(apply(par.boot, 1, sd))
}
} else {
bias.mce <- NA
se.mce <- NA
}
out <- fit
boot <- list(boots = res, se = se, se.mce = se.mce, cor = cor, vcov = vcov,
bias = bias, bias.mce = bias.mce, maxgrads = maxgrads,
extra.boots = extra.res)
out$boot <- boot
class(out) <- c("ascr.boot", class(fit))
out
}
## Aliasing old boot.admbsecr() function name.
#' @rdname boot.ascr
#' @export
boot.admbsecr <- boot.ascr
#' Combining subsamples to obtain a standard error.
#'
#' Calculates a single standard error for a parameter that has been
#' calculated by averaging over subsamples.
#'
#' @param ... A number of bootstrap model objects.
#' @param par A character string providing the parameter for which to
#' calculate a standard error.
#' @param plot Logical, if \code{TRUE}, a boxplot is produced.
#' @param ceiling A threshold value; bootstrapped parameter values
#' above this are discarded.
subsample.se <- function(..., par, plot = TRUE, ceiling = NULL){
boot.list <- list(...)
n.fits <- length(boot.list)
FUN <- function(x, par){
x$boot$boots[, par]
}
mean.pars <- apply(t(laply(boot.list, FUN, par = par)), 1, mean)
if (!is.null(ceiling)){
mean.pars <- mean.pars[mean.pars <= ceiling]
}
if (plot){
boxplot(mean.pars)
}
sd(mean.pars, na.rm = TRUE)
}
|
c710ab2cab842f17ba72f0fe5bcf72e225f6b989
|
a880badcba73ed7338eed0c910c42e58bb534703
|
/plot1.R
|
8b16c2691de5258141fb1e8ec1d8ab83f2068c2b
|
[] |
no_license
|
jesrui/ExData_Plotting1
|
8024af878a8a126f10dea0f959f71a4346634586
|
db2bfb3696a48ce5d27de193b740c0f6b690c72b
|
refs/heads/master
| 2020-11-30T12:32:06.672988
| 2015-09-13T18:42:32
| 2015-09-13T18:47:35
| 42,173,925
| 0
| 0
| null | 2015-09-09T11:14:47
| 2015-09-09T11:14:47
| null |
UTF-8
|
R
| false
| false
| 270
|
r
|
plot1.R
|
source('readData.R')
filename <- '../household_power_consumption.txt'
data <- get.power.consumption(filename)
# Plot 1
png(filename="plot1.png")
hist(data$Global_active_power,col='red',
xlab='Global Active Power (kilowatts)',main='Global Active Power')
dev.off()
|
c65cc0aa4d6649272d5d5c164da0ab4f75d70275
|
234cb2b1ad3f11d7c5a6a33e14f16d4aa5e64d32
|
/man/Normalise.rd
|
4a372df86838851c4460c257caaacf2462149c36
|
[] |
no_license
|
abhorrentPantheon/metabolomics
|
3306a2819f43f143fcf05eefd66b04a192ee66c8
|
22e566c54dc9309176fcf8f74f0ded264a8cb90b
|
refs/heads/master
| 2021-01-16T19:33:27.192251
| 2015-03-19T04:15:44
| 2015-03-19T04:15:44
| 35,789,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,589
|
rd
|
Normalise.rd
|
\name{Normalise}
\alias{Normalise}
\title{Normalisation}
\description{
Normalise a metabolomic data matrix according to a specified method.
}
\usage{
Normalise(inputdata,
method = c("median","mean","sum","ref", "is", "nomis","ccmn", "ruv2"),
refvec = NULL, ncomp = NULL, k = NULL, nc = NULL,
saveoutput = FALSE, outputname = NULL)
}
\arguments{
\item{inputdata}{A log transformed data frame in the input data format. See \code{\link{metabolomics}} for details.}
\item{method}{A character string indicating the required normalization method. Must be one of "\code{median}", "\code{mean}", "\code{sum}", "\code{ref}", "\code{is}", "\code{nomis}", "\code{ccmn}" or "\code{ruv2}". See Details for information.}
\item{refvec}{A vector of internal standards to be used with the method "\code{is}", or a reference vector to be used with the method "\code{ref}".}
\item{ncomp}{Number of PCA components to be used for the "\code{ccmn}" method. If \code{NULL}, this will be determined by cross validation as described by Redestig (2012).}
\item{k}{Number of factors of unwanted variation to be included in the
"\code{ruv2}" model.}
\item{nc}{A vector indicating which metabolites should be used as the
non-changing metabolites in the "\code{ruv2}" model, or as multiple internal
standards in the "\code{ccmn}", "\code{nomis}" and "\code{ruv2}" methods.}
\item{saveoutput}{A logical indicating whether the normalised data matrix should be saved as a .csv file.}
\item{outputname}{The name of the output file if the output has to be saved.}
}
\details{
The normalisation methods based on scaling include normalisation to a total sum, or by the median or mean of each sample, and are denoted by "\code{sum}", "\code{median}", and "\code{mean}" respectively. The method "\code{ref}" normalises the metabolite abundances to a specified reference vector.
The normalisation methods based on internal or external standards include "\code{is}" which uses a single standard, Cross-contribution Compensating Multiple internal standard Normalisation, "\code{ccmn}" (Redestig \emph{et al}., 2009); normalization using optimal selection of multiple internal standards, "\code{nomis}" (Sysi-aho \emph{et al}. 2007); and "\code{ruv2}" (De Livera \emph{et al}. 2012a).
The Remove Unwanted Varitation "\code{ruv2}" method generates a matrix of unwanted variation using non-changing metabolites including any internal or external standards. This matrix of unwanted variation can then be used for identifying differentiallly abundant metabolites in the \code{\link{LinearModelFit}} function. The RUV2 method attempts to capture both observed and unobserved technical and biological variation (De Livera \emph{et al}. 2012a, Gagnon-Bartsch \emph{et al}. 2012).
An overview of these normalisation methods are given by De Livera \emph{et al}. (2012a, 2012b). Both the "\code{ruv2}" and "\code{ccmn}" methods use the factors of interest (groups), and therefore should not be used for those unsupervised methods where the groups must be treated as unknown.
}
\value{The result is an object of class \code{\link[metabolomics:metabdata]{metabdata}}. }
\seealso{\code{\link[crmn]{normFit}}.}
\author{Alysha M De Livera, Jairus B Bowne}
\references{
De Livera, A. M., Dias, D. A., De Souza, D., Rupasinghe, T., Pyke, J.,
Tull, D., Roessner, U., McConville, M., Speed, T. P. (2012a) Normalising
and integrating metabolomics data. \emph{Analytical Chemistry} 84(24): 1076-10776.
De Livera, A.M., Olshansky, M., Speed, T. P. (2013) Statistical analysis
of metabolomics data. \emph{Methods in Molecular Biology} In press.
Gagnon-Bartsch, Johann A., Speed, T. P. (2012) Using control genes to
correct for unwanted variation in microarray data. \emph{Biostatistics} 13(3):
539-552.
Redestig, H., Fukushima, A., Stenlund, H., Moritz, T., Arita, M.,
Saito, K., Kusano, M. (2009) Compensation for systematic
cross-contribution improves normalization of mass spectrometry based
metabolomics data. \emph{Analytical Chemistry} 81(19): 7974-7980.
Sysi-Aho, M., Katajamaa, M., Yetukuri, L., Oresic, M. (2007) Normalization
method for metabolomics data using optimal selection of multiple internal
standards. \emph{BMC Bioinformatics} 8(1): 93.
}
\examples{
## Reading the data
data(mix)
Y <- log(exprs(mix))
inputdata <- data.frame(pData(mix)$type, t(Y))
batch <- pData(mix)$runorder
nc <- which(with(fData(mix), tag == "IS")==TRUE)
## Normalise by the median
norm_med <- Normalise(inputdata, method = "median")
## Normalise by an internal standard
norm_is <- Normalise(inputdata, method = "is",
refvec=inputdata[, nc[1]])
## Normalise by a reference vector, in this case an internal standard
norm_ref <- Normalise(inputdata, method = "ref",
refvec = inputdata[, nc[1]])
## Normalise by the sum
norm_sum <- Normalise(inputdata, method = "sum")
## Normalise by the NOMIS method
norm_nomis <- Normalise(inputdata, method = "nomis", nc = nc)
## Normalise by the CCMN method
norm_ccmn <- Normalise(inputdata, method = "ccmn", nc = nc, ncomp = 2)
## Normalise using RUV2 method
norm_ruv2 <- Normalise(inputdata, method = "ruv2", nc = nc, k = 9)
## Pca Plots of unwanted variation
PcaPlots(data.frame(batch, norm_ruv2$output[, -1]),
main = "Unwanted batch variation")
}
|
7f347648c77c08c4c930c6f9b5091c929da5a669
|
aeb9c1b695b40727c7cb70019ba67a3cbbd03cf2
|
/docs/rhelp/wicksell.Rd
|
54a4f9f1d044d9f37724d60f3bf2085e0bdf89b9
|
[] |
no_license
|
gamlj/gamlj.github.io
|
ed589b9ac86903902ab55ebbbacffe03afe9b5e4
|
d774d46b64040cee63a834d939538a816b6db0b9
|
refs/heads/master
| 2023-06-14T14:27:12.453343
| 2023-06-12T17:33:25
| 2023-06-12T17:33:25
| 183,435,098
| 2
| 3
| null | 2023-06-12T16:12:02
| 2019-04-25T12:59:14
|
CSS
|
UTF-8
|
R
| false
| true
| 448
|
rd
|
wicksell.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{wicksell}
\alias{wicksell}
\title{Depression over time}
\usage{
data(wicksell)
}
\description{
Data repeated measure anova with mixed models
}
\examples{
data(wicksell)
}
\references{
David C. Howell, Overview of Mixed Models \url{https://www.uvm.edu/~statdhtx/StatPages/Mixed-Models-Repeated/Mixed-Models-Overview.html}
}
\keyword{datasets}
|
e3d6d20997fc8283418adf7345e647c7cd45b4c4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sirt/examples/linking.haberman.Rd.R
|
226d39ab635ad7a7ecc5db7ac1a239f55f53adab
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,261
|
r
|
linking.haberman.Rd.R
|
library(sirt)
### Name: linking.haberman
### Title: Linking in the 2PL/Generalized Partial Credit Model
### Aliases: linking.haberman summary.linking.haberman
### Keywords: Linking Equating
### ** Examples
#############################################################################
# EXAMPLE 1: Item parameters data.pars1.rasch and data.pars1.2pl
#############################################################################
# Model 1: Linking three studies calibrated by the Rasch model
data(data.pars1.rasch)
mod1 <- sirt::linking.haberman( itempars=data.pars1.rasch )
summary(mod1)
# Model 1b: Linking these studies but weigh these studies by
# proportion weights 3 : 0.5 : 1 (see below).
# All weights are the same for each item but they could also
# be item specific.
itempars <- data.pars1.rasch
itempars$wgt <- 1
itempars[ itempars$study=="study1","wgt"] <- 3
itempars[ itempars$study=="study2","wgt"] <- .5
mod1b <- sirt::linking.haberman( itempars=itempars )
summary(mod1b)
# Model 2: Linking three studies calibrated by the 2PL model
data(data.pars1.2pl)
mod2 <- sirt::linking.haberman( itempars=data.pars1.2pl )
summary(mod2)
# additive model instaed of logarithmic model for item slopes
mod2b <- sirt::linking.haberman( itempars=data.pars1.2pl, a_log=FALSE )
summary(mod2b)
## Not run:
##D #############################################################################
##D # EXAMPLE 2: Linking longitudinal data
##D #############################################################################
##D data(data.long)
##D
##D #******
##D # Model 1: Scaling with the 1PL model
##D
##D # scaling at T1
##D dat1 <- data.long[, grep("T1", colnames(data.long) ) ]
##D resT1 <- sirt::rasch.mml2( dat1 )
##D itempartable1 <- data.frame( "study"="T1", resT1$item[, c("item", "a", "b" ) ] )
##D # scaling at T2
##D dat2 <- data.long[, grep("T2", colnames(data.long) ) ]
##D resT2 <- sirt::rasch.mml2( dat2 )
##D summary(resT2)
##D itempartable2 <- data.frame( "study"="T2", resT2$item[, c("item", "a", "b" ) ] )
##D itempartable <- rbind( itempartable1, itempartable2 )
##D itempartable[,2] <- substring( itempartable[,2], 1, 2 )
##D # estimate linking parameters
##D mod1 <- sirt::linking.haberman( itempars=itempartable )
##D
##D #******
##D # Model 2: Scaling with the 2PL model
##D
##D # scaling at T1
##D dat1 <- data.long[, grep("T1", colnames(data.long) ) ]
##D resT1 <- sirt::rasch.mml2( dat1, est.a=1:6)
##D itempartable1 <- data.frame( "study"="T1", resT1$item[, c("item", "a", "b" ) ] )
##D
##D # scaling at T2
##D dat2 <- data.long[, grep("T2", colnames(data.long) ) ]
##D resT2 <- sirt::rasch.mml2( dat2, est.a=1:6)
##D summary(resT2)
##D itempartable2 <- data.frame( "study"="T2", resT2$item[, c("item", "a", "b" ) ] )
##D itempartable <- rbind( itempartable1, itempartable2 )
##D itempartable[,2] <- substring( itempartable[,2], 1, 2 )
##D # estimate linking parameters
##D mod2 <- sirt::linking.haberman( itempars=itempartable )
##D
##D #############################################################################
##D # EXAMPLE 3: 2 Studies - 1PL and 2PL linking
##D #############################################################################
##D set.seed(789)
##D I <- 20 # number of items
##D N <- 2000 # number of persons
##D # define item parameters
##D b <- seq( -1.5, 1.5, length=I )
##D # simulate data
##D dat1 <- sirt::sim.raschtype( stats::rnorm( N, mean=0,sd=1 ), b=b )
##D dat2 <- sirt::sim.raschtype( stats::rnorm( N, mean=0.5,sd=1.50 ), b=b )
##D
##D #*** Model 1: 1PL
##D # 1PL Study 1
##D mod1 <- sirt::rasch.mml2( dat1, est.a=rep(1,I) )
##D summary(mod1)
##D # 1PL Study 2
##D mod2 <- sirt::rasch.mml2( dat2, est.a=rep(1,I) )
##D summary(mod2)
##D
##D # collect item parameters
##D dfr1 <- data.frame( "study1", mod1$item$item, mod1$item$a, mod1$item$b )
##D dfr2 <- data.frame( "study2", mod2$item$item, mod2$item$a, mod2$item$b )
##D colnames(dfr2) <- colnames(dfr1) <- c("study", "item", "a", "b" )
##D itempars <- rbind( dfr1, dfr2 )
##D
##D # Haberman linking
##D linkhab1 <- sirt::linking.haberman(itempars=itempars)
##D ## Transformation parameters (Haberman linking)
##D ## study At Bt
##D ## 1 study1 1.000 0.000
##D ## 2 study2 1.465 -0.512
##D ##
##D ## Linear transformation for item parameters a and b
##D ## study A_a A_b B_b
##D ## 1 study1 1.000 1.000 0.000
##D ## 2 study2 0.682 1.465 -0.512
##D ##
##D ## Linear transformation for person parameters theta
##D ## study A_theta B_theta
##D ## 1 study1 1.000 0.000
##D ## 2 study2 1.465 0.512
##D ##
##D ## R-Squared Measures of Invariance
##D ## slopes intercepts
##D ## R2 1 0.9979
##D ## sqrtU2 0 0.0456
##D
##D #*** Model 2: 2PL
##D # 2PL Study 1
##D mod1 <- sirt::rasch.mml2( dat1, est.a=1:I )
##D summary(mod1)
##D # 2PL Study 2
##D mod2 <- sirt::rasch.mml2( dat2, est.a=1:I )
##D summary(mod2)
##D
##D # collect item parameters
##D dfr1 <- data.frame( "study1", mod1$item$item, mod1$item$a, mod1$item$b )
##D dfr2 <- data.frame( "study2", mod2$item$item, mod2$item$a, mod2$item$b )
##D colnames(dfr2) <- colnames(dfr1) <- c("study", "item", "a", "b" )
##D itempars <- rbind( dfr1, dfr2 )
##D
##D # Haberman linking
##D linkhab2 <- sirt::linking.haberman(itempars=itempars)
##D ## Transformation parameters (Haberman linking)
##D ## study At Bt
##D ## 1 study1 1.000 0.000
##D ## 2 study2 1.468 -0.515
##D ##
##D ## Linear transformation for item parameters a and b
##D ## study A_a A_b B_b
##D ## 1 study1 1.000 1.000 0.000
##D ## 2 study2 0.681 1.468 -0.515
##D ##
##D ## Linear transformation for person parameters theta
##D ## study A_theta B_theta
##D ## 1 study1 1.000 0.000
##D ## 2 study2 1.468 0.515
##D ##
##D ## R-Squared Measures of Invariance
##D ## slopes intercepts
##D ## R2 0.9984 0.9980
##D ## sqrtU2 0.0397 0.0443
##D
##D #############################################################################
##D # EXAMPLE 4: 3 Studies - 1PL and 2PL linking
##D #############################################################################
##D set.seed(789)
##D I <- 20 # number of items
##D N <- 1500 # number of persons
##D # define item parameters
##D b <- seq( -1.5, 1.5, length=I )
##D # simulate data
##D dat1 <- sirt::sim.raschtype( stats::rnorm( N, mean=0,sd=1 ), b=b )
##D dat2 <- sirt::sim.raschtype( stats::rnorm( N, mean=0.5,sd=1.50 ), b=b )
##D dat3 <- sirt::sim.raschtype( stats::rnorm( N, mean=-.2,sd=.8 ), b=b )
##D # set some items to non-administered
##D dat3 <- dat3[, -c(1,4) ]
##D dat2 <- dat2[, -c(1,2,3) ]
##D
##D #*** Model 1: 1PL in sirt
##D # 1PL Study 1
##D mod1 <- sirt::rasch.mml2( dat1, est.a=rep(1,ncol(dat1)) )
##D summary(mod1)
##D # 1PL Study 2
##D mod2 <- sirt::rasch.mml2( dat2, est.a=rep(1,ncol(dat2)) )
##D summary(mod2)
##D # 1PL Study 3
##D mod3 <- sirt::rasch.mml2( dat3, est.a=rep(1,ncol(dat3)) )
##D summary(mod3)
##D
##D # collect item parameters
##D dfr1 <- data.frame( "study1", mod1$item$item, mod1$item$a, mod1$item$b )
##D dfr2 <- data.frame( "study2", mod2$item$item, mod2$item$a, mod2$item$b )
##D dfr3 <- data.frame( "study3", mod3$item$item, mod3$item$a, mod3$item$b )
##D colnames(dfr3) <- colnames(dfr2) <- colnames(dfr1) <- c("study", "item", "a", "b" )
##D itempars <- rbind( dfr1, dfr2, dfr3 )
##D
##D # use person parameters
##D personpars <- list( mod1$person[, c("EAP","SE.EAP") ], mod2$person[, c("EAP","SE.EAP") ],
##D mod3$person[, c("EAP","SE.EAP") ] )
##D
##D # Haberman linking
##D linkhab1 <- sirt::linking.haberman(itempars=itempars, personpars=personpars)
##D # compare item parameters
##D round( cbind( linkhab1$joint.itempars[,-1], linkhab1$b.trans )[1:5,], 3 )
##D ## aj bj study1 study2 study3
##D ## I0001 0.998 -1.427 -1.427 NA NA
##D ## I0002 0.998 -1.290 -1.324 NA -1.256
##D ## I0003 0.998 -1.140 -1.068 NA -1.212
##D ## I0004 0.998 -0.986 -1.003 -0.969 NA
##D ## I0005 0.998 -0.869 -0.809 -0.872 -0.926
##D
##D # summary of person parameters of second study
##D round( psych::describe( linkhab1$personpars[[2]] ), 2 )
##D ## var n mean sd median trimmed mad min max range skew kurtosis
##D ## EAP 1 1500 0.45 1.36 0.41 0.47 1.52 -2.61 3.25 5.86 -0.08 -0.62
##D ## SE.EAP 2 1500 0.57 0.09 0.53 0.56 0.04 0.49 0.84 0.35 1.47 1.56
##D ## se
##D ## EAP 0.04
##D ## SE.EAP 0.00
##D
##D #*** Model 2: 2PL in TAM
##D library(TAM)
##D # 2PL Study 1
##D mod1 <- TAM::tam.mml.2pl( resp=dat1, irtmodel="2PL" )
##D pvmod1 <- TAM::tam.pv(mod1, ntheta=300, normal.approx=TRUE) # draw plausible values
##D summary(mod1)
##D # 2PL Study 2
##D mod2 <- TAM::tam.mml.2pl( resp=dat2, irtmodel="2PL" )
##D pvmod2 <- TAM::tam.pv(mod2, ntheta=300, normal.approx=TRUE)
##D summary(mod2)
##D # 2PL Study 3
##D mod3 <- TAM::tam.mml.2pl( resp=dat3, irtmodel="2PL" )
##D pvmod3 <- TAM::tam.pv(mod3, ntheta=300, normal.approx=TRUE)
##D summary(mod3)
##D
##D # collect item parameters
##D #!! Note that in TAM the parametrization is a*theta - b while linking.haberman
##D #!! needs the parametrization a*(theta-b)
##D dfr1 <- data.frame( "study1", mod1$item$item, mod1$B[,2,1], mod1$xsi$xsi / mod1$B[,2,1] )
##D dfr2 <- data.frame( "study2", mod2$item$item, mod2$B[,2,1], mod2$xsi$xsi / mod2$B[,2,1] )
##D dfr3 <- data.frame( "study3", mod3$item$item, mod3$B[,2,1], mod3$xsi$xsi / mod3$B[,2,1] )
##D colnames(dfr3) <- colnames(dfr2) <- colnames(dfr1) <- c("study", "item", "a", "b" )
##D itempars <- rbind( dfr1, dfr2, dfr3 )
##D
##D # define list containing person parameters
##D personpars <- list( pvmod1$pv[,-1], pvmod2$pv[,-1], pvmod3$pv[,-1] )
##D
##D # Haberman linking
##D linkhab2 <- sirt::linking.haberman(itempars=itempars,personpars=personpars)
##D ## Linear transformation for person parameters theta
##D ## study A_theta B_theta
##D ## 1 study1 1.000 0.000
##D ## 2 study2 1.485 0.465
##D ## 3 study3 0.786 -0.192
##D
##D # extract transformed person parameters
##D personpars.trans <- linkhab2$personpars
##D
##D #############################################################################
##D # EXAMPLE 5: Linking with simulated item parameters containing outliers
##D #############################################################################
##D
##D # simulate some parameters
##D I <- 38
##D set.seed(18785)
##D b <- stats::rnorm( I, mean=.3, sd=1.4 )
##D # simulate DIF effects plus some outliers
##D bdif <- stats::rnorm(I,mean=.4,sd=.09)+( stats::runif(I)>.9 )* rep( 1*c(-1,1)+.4, each=I/2 )
##D # create item parameter table
##D itempars <- data.frame( "study"=paste0("study",rep(1:2, each=I)),
##D "item"=paste0( "I", 100 + rep(1:I,2) ), "a"=1,
##D "b"=c( b, b + bdif ) )
##D
##D #*** Model 1: Haberman linking with least squares regression
##D mod1 <- sirt::linking.haberman( itempars=itempars )
##D summary(mod1)
##D
##D #*** Model 2: Haberman linking with robust bisquare regression
##D mod2 <- sirt::linking.haberman( itempars=itempars2, b_trim=.4, maxiter=20)
##D summary(mod2)
## End(Not run)
|
59f45cd7b15d15246b3080fc4c24c7aebf2cbdfb
|
fcaaf7ba8ec7e21883394ad57f3fb544f4dd63dc
|
/Cap04/06-BarPlots.R
|
c8c1a8020ebdd1b758ee6bc95778b07bab9d0703
|
[] |
no_license
|
GasparPSousa/BigDataAnalytics-R-Azure
|
f3226150461496c0d78781bfd8fe3b5bb5237199
|
aeeb060f32f8846ea80f6bc4631d0f07d21cbf1e
|
refs/heads/main
| 2023-05-14T23:57:15.302363
| 2021-06-06T14:04:48
| 2021-06-06T14:04:48
| 357,303,863
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,843
|
r
|
06-BarPlots.R
|
# Bar Plots
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
setwd("~/Cursos/DSA/FCD/BigDataRAzure/Cap04")
# Para saber qual diretório estou trabalhando
getwd()
# Lista de pacotes base carregados
search()
?barplot
# Preparando os dados - número de casamentos em uma igreja de SP
dados <- matrix(c(652, 1537, 598, 242, 36, 46, 38, 21, 218, 327, 106, 67), nrow = 3, byrow = T)
dados
# Nomeando linhas e colunas na matriz
colnames(dados) <- c("0", "1-150", "151-300", "> 300")
rownames(dados) <- c("Jovem", "Adulto", "Idoso")
dados
# Construindo o Barplot
barplot(dados, beside = T)
# Construindo o plot - Stacked Bar Plot
barplot(dados)
# As 3 faixas de idade são representadas na mesma coluna para as diferentes quantidades
barplot(dados, col = c("steelblue1", "tan3", "seagreen3"))
# Crie uma legenda para o gráfico anterior
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3"), legend = c("Jovem","Adulto","Idoso"))
?pch
?legend
# Agora temos uma coluna para cada faixa etária, mas na mesma faixa de quantidade
barplot(dados, beside = T, col = c("steelblue1", "tan3", "seagreen3"),
main = "Número de Casamentos em São Paulo")
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3"),
legend = c("Jovem","Adulto","Idoso"))
# Com a Transposta da matriz, invertemos as posições entre faixa etária e faixa de quantidade
barplot(t(dados), beside = T, col = c("steelblue1", "tan3", "seagreen3", "peachpuff1"),
main = "Número de Casamentos em São Paulo")
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3", "peachpuff1"),
legend = c("0","1-150","151-300",">300"))
# Salvando o BarPlot1 em png.
png("Grafico6_BarPlot1.png", width = 900, height = 900, res = 72)
barplot(dados, col = c("steelblue1", "tan3", "seagreen3"))
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3"), legend = c("Jovem","Adulto","Idoso"))
dev.off()
# Salvando o BarPlot2 em png.
png("Grafico6_BarPlot2.png", width = 900, height = 900, res = 72)
barplot(dados, beside = T, col = c("steelblue1", "tan3", "seagreen3"),
main = "Número de Casamentos em São Paulo")
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3"),
legend = c("Jovem","Adulto","Idoso"))
dev.off()
# Salvando o BarPlot3 em png.
png("Grafico6_BarPlot3.png", width = 900, height = 900, res = 72)
barplot(t(dados), beside = T, col = c("steelblue1", "tan3", "seagreen3", "peachpuff1"),
main = "Número de Casamentos em São Paulo")
legend("topright", pch = 19, col = c("steelblue1", "tan3", "seagreen3", "peachpuff1"),
legend = c("0","1-150","151-300",">300"))
dev.off()
# Sair
q()
|
40e7ab443f96cbc090517cb3994a8354d1e55511
|
985dd57a2845aad61f61c6d4e24bdb91a99333e1
|
/tests/testthat/test-kmers.R
|
2b74cad318f52bc075b46ed3157c7e8c14f4bb2a
|
[] |
no_license
|
kriemo/kentr
|
f8e84bb33ea2097d6e272b0b0aa41a72cd5bedb6
|
c4122d7b10de03f273db33727a777c8bc49926bd
|
refs/heads/master
| 2021-07-20T07:47:29.757326
| 2021-05-03T17:35:32
| 2021-05-03T17:35:32
| 88,792,047
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
test-kmers.R
|
context('kmers')
fa_file <- system.file("extdata", "test.fasta", package = "kentr")
df <- data.frame(
chrom = c("chr1",
"chr1"),
start = c(24000,
24000),
end = c(24100,
24100),
strand = c("+",
"-"))
res <- get_sequences(df, fa_file)
test_that('basic usage works', {
kmers <- get_kmers(res$seq, 3)
expect_true(is.list(kmers))
# all length 3
expect_true(all(sapply(kmers[[1]]$kmer, nchar) == 3))
# all uppercase
expect_true(all( toupper(kmers[[1]]$kmer) == kmers[[1]]$kmer))
})
test_that('homopolymers are correct', {
test_seq <- c("AAAAAA")
kmers <- get_kmers(test_seq, 1)
expect_true(kmers[[1]]$kmer == "A")
expect_true(kmers[[1]]$counts == 6)
test_seq <- c("AAAAAA")
kmers <- get_kmers(test_seq, 2)
expect_true(kmers[[1]]$kmer == "AA")
expect_true(kmers[[1]]$counts == 5)
test_seq <- c("AAAAAA")
kmers <- get_kmers(test_seq, 6)
expect_true(kmers[[1]]$kmer == "AAAAAA")
expect_true(kmers[[1]]$counts == 1)
})
test_that('NA reported for seq too small', {
test_seq <- c("A")
kmers <- get_kmers(test_seq, 10)
expect_true(all(is.na(kmers[[1]])))
})
test_that('both_strands arg works', {
test_seq <- c("AATTAA")
kmers <- get_kmers(test_seq, 6)
rc_test_seq <- revComp(test_seq)
rc_kmers <- get_kmers(rc_test_seq, 6, both_strands = TRUE)
expect_equal(kmers, rc_kmers)
test_seq <- c("GGAACCTT")
kmers <- get_kmers(test_seq, 2)
expect_true("GG" %in% kmers[[1]]$kmer)
expect_true("TT" %in% kmers[[1]]$kmer)
kmers <- get_kmers(test_seq, 2, both_strands = TRUE)
expect_false("GG" %in% kmers[[1]]$kmer)
expect_false("TT" %in% kmers[[1]]$kmer)
kmer_df <- kmers[[1]]
expect_true(kmer_df[kmer_df$kmer == "AA", "counts"] == 2)
expect_true(kmer_df[kmer_df$kmer == "CC", "counts"] == 2)
})
|
77bd7d17e1a4d062869cb7da0aa5c8c1fd67c68e
|
1839b1bc21a43384e9c169f0bf5fd0a3e4c68b0a
|
/w18/R/mergeMotifs.R
|
cc28267ea33a2b2c128166fbdd21263866352d1b
|
[] |
no_license
|
CarlosMoraMartinez/worm19
|
b592fa703896e1bbb6b83e41289674c63a046313
|
99fb3ef35d13739ee83f08b2ac1107179ea05ee2
|
refs/heads/master
| 2020-07-18T23:25:13.542031
| 2019-07-03T14:53:04
| 2019-07-03T14:53:04
| 206,333,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 991
|
r
|
mergeMotifs.R
|
#' mergeMotifs
#'
#' Merges all the motif data from several windows into one row, avoiding
#' redundancies.
#'
#' @param win window dataframe with the following fields:
#' "width", "signature", "offsets", "strands", "seqs", "starts", "ends".
#' @return A dataframe with 1 row, containing the mentioned variables.
#' Each field contains the all the motifs pasted into a single string,
#' using "_" as separators.
#' @keywords merge, motifs
#' @import data.table
#' @export
mergeMotifs <- function(win){
win2 <- win[, c("width", "signature", "offsets", "strands", "seqs",
"starts", "ends")]
winlist <- apply(win2, 1, FUN = function(x){
as.data.frame(sapply(x, strsplit, split="_"))
})
windf <- rbindlist(winlist) # requires data.table package
windf <- windf[!duplicated(windf), ]
row <- sapply(windf, paste, sep="_", collapse="_")
names(row) <- c("widths", "signature", "offsets",
"strands", "seqs", "starts", "ends")
return(row)
}
|
59a5e53f36020647f102d134a5581b0f3f79890d
|
a893f063e4fb685c6d959882b74f35cfc17686ee
|
/solutions/reading_excel_or_off_internet.R
|
1458153ca5d1c6d23d29f845448797defbb0c9fd
|
[] |
no_license
|
jmarshallnz/intro_to_r
|
e8ebe29fe4df3d32f9848e79eb75ead3a46bce4c
|
35065a9255915f5f9eec4248972560fcbe7ff991
|
refs/heads/main
| 2023-06-16T10:09:57.485226
| 2021-06-21T04:13:12
| 2021-06-27T23:29:42
| 332,910,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
reading_excel_or_off_internet.R
|
# Reading from Excel, or reading off the internet
library(tidyverse)
library(readxl)
# Try reading an excel sheet
covid19 = read_excel("data/covid19/covid-cases-30july20.xlsx")
covid19
# You'll notice there is some junk at the top.
# We need to skip the first 3 lines in the sheet before reading
covid19 = read_excel("data/covid19/covid-cases-30july20.xlsx",
skip=3)
covid19
# That looks better!
# Try reading off the inteenet:
donkeys <- read_csv("https://www.massey.ac.nz/~jcmarsha/227215/data/donkey.csv")
|
de711cf1f87a223c3c5e4f39d71aa0ca1276d105
|
57b9a09f56013a8867a5505b03945a37aa2f6275
|
/run_analysis.R
|
52c44023f40cdb6c379151f10df2413af205dc84
|
[] |
no_license
|
AOverlack/CourseraWork
|
0219b818bda1f6618c48fdb15b2693979f561476
|
6463dd1e8aa3faa9caeae0813c56a834d0ef60b4
|
refs/heads/master
| 2021-01-13T00:42:02.982321
| 2015-07-26T21:14:58
| 2015-07-26T21:14:58
| 36,096,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,195
|
r
|
run_analysis.R
|
## PREPRATORY MAUNAL ACTIVITY
## Download the fileS from the file address:
## Unzip the files mually in windows
## Create folder in working directory, named "data".
## Store the unzipped files "X_train.txt" and "X_test.txt","subject_train.txt",
## "subject_test.txt" "features.txt","y_test.txt", "y_train.txt" and "activity_labels.txt"
## in the folder "data"
## Read fixed-width text file into dataframes, using read.table
DFTest <- read.table("./data/X_test.txt")
DFTrain <- read.table("./data/X_train.txt")
## Create "Subject" identifier data frame for both test and training set (Length!)
Subjecttest <- read.table("./data/subject_test.txt")
Subjecttrain <- read.table("./data/subject_train.txt")
##Add column name to both "subject" data frames using "dplyr" package
library(dplyr)
Subjecttest <- rename(Subjecttest,Subject=V1)
Subjecttrain <- rename(Subjecttrain,Subject=V1)
## Create "Activity" identifier data frame for test and training (Different Lengths !)
Activitytest <- read.table("./data/y_test.txt")
Activitytrain <- read.table("./data/y_train.txt")
Add column name to both "Activity" data frames using "dplyr"package
Activitytest <- rename(Activitytest,Activity=V1)
Activitytrain <- rename(Activitytrain,Activity=V1)
## Create Headers data frame from "features.txt" dataset
Headers <- read.table("./data/features.txt")
## NB Headers prove to contain double entries so these and the corresponding
## columns in the DFTest and DFtrain sets must be removed. So first we need to add the Column
## names to the data frames DFTest and DFTrain
names(DFTest) <- Headers$V2
names(DFTrain) <- Headers$V2
## Add the columns for Activity and Subject to DFTest and DFTrain and bind both dataframes
DFTestWide <- cbind(Activitytest,Subjecttest,DFTest)
DFTrainWide <- cbind(Activitytrain,Subjecttrain,DFTrain)
DFTotal <- rbind(DFTestWide,DFTrainWide)
## Remove duplicate column entries
DFTotalClean <- DFTotal[,unique(colnames(DFTotal))]
## Replace the numerical values in "Activity" column by names of activity from "activity_labels.txt"
Activity <- read.table("./data/activity_labels.txt")
DFTotalClean$Activity <- Activity$V2[match(DFTotalClean$Activity,Activity$V1)]
## Select only relevant (mean and std) variables from DFTotal, using "dplyr"package
## and sort by Activity and Subject
DFinal <- select(DFTotalClean,contains("Activity"),contains("Subject"),contains("-mean()-"),contains("-std()-"))
DFinalSort <- DFinal [with(DFinal,order(Activity,Subject)),]
## Export to a text file called "TidyData-1.txt".
write.table(DFinalSort,file="TidyData-1",row.names=FALSE)
## Create tidy dataset containing only averages of variables for each Activity and each Subject.
## Transform DFinalSort into a table
library(data.table)
DT <- data.table(DFinalSort)
## Assign variable names for which the mean must be calculated
## Excluding the first 2 (Activity and Subject))
variables <- tail(names(DT),-2)
## Create table with Mean values per variable, by Activity and by Subject
DTMean <- DT[,lapply(.SD,mean),.SDcols=variables, by=list(Activity,Subject)]
## Create tidy dataset with mean values
write.table(DTMean,file="TidyData-Means.txt",row.names=FALSE)
|
7e3d940640217ed3fc625c932940daf3f762487e
|
8884141f6e515990fede8c1509d4e855d821ac73
|
/for_liam.R
|
f03562c4795691bfc07ec0f516dc0b5e258a2ffe
|
[] |
no_license
|
samlipworth/heaps
|
6a0ebe14989c838ae735816946f9e913674ec6c7
|
6802a0ba5921637c589558b3f69e800bd0080ffa
|
refs/heads/main
| 2023-08-11T06:04:49.358950
| 2021-09-30T08:54:42
| 2021-09-30T08:54:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
r
|
for_liam.R
|
library(tidyverse)
library(data.table)
library(micropan)
guuids<-read_tsv('./data/guuids')
cg<-filter(guuids,grepl('chromo',guuid)) # cg = chromosomal guuids
pg<-filter(guuids,!grepl('chromo',guuid)) # pg = plasmid guuids
guuids$guuid<-str_replace_all(guuids$guuid,'_.*','')
spec<-read_tsv('./data/kleborate.tsv') %>% select(strain,species)
guuids<-left_join(guuids,spec,by=c("guuid"="strain"))
guuids<-filter(guuids,grepl('Kleb',species) | grepl('Esch',species))
c<-fread('~/data/gene_presence_absence.Rtab') # this is just the Panaroo matrix
x<-names(c)
chromo<-x[grepl('chromo',x)]
chromo_pangenome<-dplyr::select(c,chromo)
#nb these are concatenated, i.e. all plasmids in one fasta file
plasmid_pangenome <- fread('~/data/gene_presence_absence_concat_plas.Rtab')
dates<-read.delim('~/data/guuids_dates',sep=' ',header=F)
names(dates)<-c('acc','cd','t','guuid')
dates$year<-year(dates$cd)
d2009<-filter(dates, year==2009) %>% filter(guuid %in% guuids$guuid)
d2018<-filter(dates, year==2018) %>% filter(guuid %in% guuids$guuid)
x<-names(chromo_pangenome)
x<-str_replace_all(x,'_.*','')
names(chromo_pangenome)<-x
cpg2009<-dplyr::select(chromo_pangenome,d2009$guuid)
cpg2009<-cpg2009[rowSums(cpg2009)>0]
cpg2018<-dplyr::select(chromo_pangenome,d2018$guuid)
cpg2018<-cpg2018[rowSums(cpg2018)>0]
out_cpg_2018=NULL
for(i in 1:100){
print(i)
t<-micropan::heaps(t(cpg2018),n.perm = 100)
out_cpg_2018=rbind(out_cpg_2018,t[2])
}
out_cpg_2009=NULL
for(i in 1:100){
print(i)
t<-micropan::heaps(t(cpg2009),n.perm = 100)
out_cpg_2009=rbind(out_cpg_2009,t[2])
}
d2009<-filter(d2009,guuid %in% names(plasmid_pangenome)) # not all isolates have plasmids
d2018<-filter(d2018,guuid %in% names(plasmid_pangenome))
ppg2009<-dplyr::select(plasmid_pangenome,d2009$guuid)
ppg2009<-ppg2009[rowSums(ppg2009)>0]
ppg2018<-dplyr::select(plasmid_pangenome,d2018$guuid)
ppg2018<-ppg2018[rowSums(ppg2018)>0]
out_ppg_2018=NULL
for(i in 1:100){
print(i)
t<-micropan::heaps(t(ppg2018),n.perm = 100)
out_ppg_2018=rbind(out_ppg_2018,t[2])
}
out_ppg_2009=NULL
for(i in 1:100){
print(i)
t<-micropan::heaps(t(ppg2009),n.perm = 100)
out_ppg_2009=rbind(out_ppg_2009,t[2])
}
|
d4faf3035ea14965a059bd8bc9739ea72b1a4619
|
2741826483417f28990d21a414821f0d741b811c
|
/inst/tinytest/test_character2integer.R
|
e120ca396a735fc20b5c8244f0038a15f164687e
|
[] |
no_license
|
HughParsonage/hutilscpp
|
3262201a11d2026d37284f709e4e0e6fbb53d3a9
|
143f9e2dca2c0f25e6e64388fdfd8f6db64477d9
|
refs/heads/master
| 2022-10-20T22:12:58.646043
| 2022-10-07T07:20:58
| 2022-10-07T07:20:58
| 155,201,062
| 8
| 3
| null | 2022-09-29T15:58:30
| 2018-10-29T11:33:08
|
R
|
UTF-8
|
R
| false
| false
| 4,072
|
r
|
test_character2integer.R
|
library(hutilscpp)
x <- c(1626783884L, 969909421L, 205541854L, -1L, 0L, 1L, -1214788235L,
-709260613L, -795055625L)
cx <- prettyNum(x, big.mark = ",")
expect_equal(character2integer(cx), x)
cx <- prettyNum(x)
expect_equal(character2integer(cx), x)
x <- as.double(x)
cx <- prettyNum(x, big.mark = ",")
expect_equal(character2integer(cx), x)
cx <- prettyNum(x)
expect_equal(character2integer(cx), x)
x <- c(1L, 3L, 7L, 17L, 43L, 105L, 254L, 616L, 1493L, 3616L, 8761L,
21225L, 51417L, 124557L, 301736L, 730948L, 1770704L, 4289486L,
10391170L, 25172341L, 60979343L, 147720878L, 357849998L, 866882343L,
2100000000L)
x <- c(x, -5:5, -x)
cx <- as.character(x)
expect_equal(character2integer(cx), x)
cx <- Comma(x, big.mark = "_")
expect_equal(character2integer(cx), x)
cx <- Comma(x, big.mark = "'")
expect_equal(character2integer(cx), x)
expect_equal(character2integer("12345678901", allow.double = NA), NA_integer_)
expect_equal(character2integer("12345678901", allow.double = TRUE), 12345678901)
expect_error(character2integer("12345678901", allow.double = FALSE))
expect_equal(Comma(5700000.05, digits = 2), "5,700,000.05")
expect_equal(character2integer(" -5,000.5", allow.double = TRUE), -5000.5)
expect_equal(character2integer(" -7,000", na.strings = NA_character_), -7000)
expect_equal(character2integer(c(" -7,000", NA), na.strings = NA_character_), c(-7000L, NA))
expect_true(is.integer(character2integer("1234.00")))
expect_true(is.integer(character2integer("2,012,345,345.00000")))
expect_true(identical(character2integer(c(NA, "1 234 567 890"), na.strings = "NA"),
c(NA, 1234567890L)))
expect_equal(Comma(c(NA, 50, 1234.44, -14.1, Inf, -Inf), digits = 2L),
c("NA", "50.00", "1,234.44", "-14.10", "Inf", "-Inf"))
expect_equal(character2integer(c(NA, "5,300")), c(NA, 5300L))
expect_equal(Comma(c(0, 0.5, 1234.56), digits = 2L), c("0.00", "0.50", "1,234.56"))
expect_error(character2integer(55), "must be type char")
expect_error(character2integer("5300", na.strings = 0), "must be character")
expect_error(Comma(5300.2, digits = .Machine$integer.max), "unlikely high value")
expect_equal(character2integer(c("-99", "5300"), na.strings = "-99"), c(NA, 5300L))
expect_equal(character2integer(c("-99", "-8", "-99", "-9", "5300"),
na.strings = "-99"),
c(NA, -8L, NA, -9L, 5300L))
expect_equal(character2integer(c("-99", "-8", "-99", "-9", "5300",
"3,000,000,000"),
na.strings = "-99",
allow.double = TRUE),
c(NA, -8L, NA, -9L, 5300L, 3e9))
expect_equal(character2integer(c("-99", "-8", "-99", "-9", "5300",
"3,000,000,000"),
na.strings = c("-99", "-9"),
allow.double = TRUE),
c(NA, -8L, NA, NA, 5300L, 3e9))
# test small numbers (ensuring they are )
expect_equal(Comma(c(0.00000, 55), digits = 1L),
c("0.0", "55.0"))
expect_equal(Comma(c(0.00000, 55, 5.1), digits = 1L),
c("0.0", "55.0", "5.1"))
expect_equal(Comma(c(0.000001, 55), digits = 1L), c("0.0", "55.0"))
expect_equal(Comma(c(0.000001, 55, 5.1), digits = 1L), c("0.0", "55.0", "5.1"))
expect_equal(Comma(c(1.000001, 55), digits = 1L), c("1.0", "55.0"))
expect_equal(Comma(-0.1, digits = 0L), "-0")
expect_equal(Comma(0.1, digits = 0L), "0")
expect_equal(Comma(c(5123L), big.mark = " "), "5 123")
expect_equal(Comma(c(5123L), big.mark = '"'), '5"123')
expect_equal(Comma(c(5123L), big.mark = '~'), '5~123')
expect_equal(Comma(c(5, 4, 5.5), big.mark = ",", digits = 1L),
c("5.0", "4.0", "5.5"))
expect_equal(character2integer(c("5.0", "4.0", "5.5"), allow.double = TRUE,
na.strings = "na"),
c(5, 4, 5.5))
expect_error(character2integer(c("5", "55", "55.005"), allow.double = FALSE),
"FALSE")
expect_true(is.double(character2integer(c("5", "55", "55.005"), allow.double = TRUE)))
|
f47bd9f3e0db199b596f94e97c33281c9a2efff0
|
eb9128ca974c2407b6760d6feee036724f7fe22f
|
/activity2/activity2_script.r
|
e138e3bf112c9297e670fa5e7a3fdaaf199cb3bf
|
[] |
no_license
|
kevinlzw/GEOG331
|
da5c7780da535d5a5a4792e6170b89bfe8867d20
|
e31d6b566cc5c7b83a0a084bd96d5c4f189502ae
|
refs/heads/master
| 2020-12-20T10:20:17.463422
| 2020-04-24T20:38:27
| 2020-04-24T20:38:27
| 236,040,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,933
|
r
|
activity2_script.r
|
#make a vector of tree heights in meters
heights <- c(30,41,20,22)
#convert to cm
heights_cm <- heights*100
heights_cm
#first item in the vectors
heights[1]
#look at the 2nd and 3rd tree heights
heights[2:3]
help(matrix)
#set up a matrix with 2 columns and fills in by row
Mat <- matrix(c(1,2,3,4,5,6), ncol=2, byrow = T)
Mat
#set up a matrix with 2 columns and fills in by column
Mat.bycol <- matrix(c(1,2,3,4,5,6), ncol=2, byrow = F)
Mat.bycol
#index row 1, column 2
Mat.bycol[1,2]
#look at all values in column 2
Mat.bycol[,2]
#read in weather station file from the data folder
datW <- read.csv("y:\\Students\\klian\\a02\\2011124.csv")
#get more information about the dataframe
str(datW)
#specify a column with a proper date format
datW$dateF <- as.Date(datW$DATE, "%Y-%m-%d")
#create a date column by reformatting the date to only include years
#and indicating that it should be treated as numeric data
datW$year <- as.numeric(format(datW$dateF,"%Y"))
#Q2
#an example of character vector
character_vector <- c("ab", "food", "water", "tired", "AB")
class(character_vector)
#an example of numeric vector
numeric_vector <- c(1, -0.2, 999, 0.55, -50)
class(numeric_vector)
#an example of integer vector
integer_vector <- c(1L, -2L, 999L, 55L, -50L)
class(integer_vector)
#an example of factor
factor_example <- factor(c("banana", "banana", "apple", "apple"))
class(factor_example)
#find out all unique site names
levels(datW$NAME)
#look at the mean maximum temperature for Aberdeen
mean(datW$TMAX[datW$NAME == "ABERDEEN, WA US"])
#look at the mean maximum temperature for Aberdeen
#with na.rm argument set to true to ingnore NA
mean(datW$TMAX[datW$NAME == "ABERDEEN, WA US"], na.rm=T)
#calculate the average daily temperature
#This temperature will be halfway between the minimum and maximum temperature
datW$TAVE <- datW$TMIN + ((datW$TMAX-datW$TMIN)/2)
#get the mean across all sites
#the by function is a list of one or more variables to index over.
#FUN indicates the function we want to use
#if you want to specify any function specific arguments use a comma and add them after the function
#here we want to use the na.rm arguments specific to
averageTemp <- aggregate(datW$TAVE, by=list(datW$NAME), FUN="mean",na.rm=TRUE)
averageTemp
#change the automatic output of column names to be more meaningful
#note that MAAT is a common abbreviation for Mean Annual Air Temperature
colnames(averageTemp) <- c("NAME","MAAT")
averageTemp
#convert level to number for factor data type
#you will have to reference the level output or look at the row of data to see the character designation.
datW$siteN <- as.numeric(datW$NAME)
#Q4
#add four histograms to the same window, first we initialize
par(mar=c(1,1,1,1))
par(mfrow=c(2,2))
#make a histogram for the first site in our levels
#main= is the title name argument.
#Here you want to paste the actual name of the factor not the numeric index
#since that will be more meaningful.
hist(datW$TAVE[datW$siteN == 1],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="grey50",
border="white")
#add mean line with red (tomato3) color
#and thickness of 3
abline(v = mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
col = "tomato3",
lwd = 3)
#add standard deviation line below the mean with red (tomato3) color
#and thickness of 3
abline(v = mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE) - sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
#add standard deviation line above the mean with red (tomato3) color
#and thickness of 3
abline(v = mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE) + sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
#make a histogram for Livermore, CA US
hist(datW$TAVE[datW$siteN == 2],
freq=FALSE,
main = paste(levels(datW$NAME)[2]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="blue",
border="white")
abline(v = mean(datW$TAVE[datW$siteN == 2],na.rm=TRUE),
col = "tomato3",
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 2],na.rm=TRUE) - sd(datW$TAVE[datW$siteN == 2],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 2],na.rm=TRUE) + sd(datW$TAVE[datW$siteN == 2],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
#make a histogram for MANDAN EXPERIMENT STATION, ND US
hist(datW$TAVE[datW$siteN == 3],
freq=FALSE,
main = paste(levels(datW$NAME)[3]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="cyan",
border="white")
abline(v = mean(datW$TAVE[datW$siteN == 3],na.rm=TRUE),
col = "tomato3",
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 3],na.rm=TRUE) - sd(datW$TAVE[datW$siteN == 3],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 3],na.rm=TRUE) + sd(datW$TAVE[datW$siteN == 3],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
#make a histogram for MORMON FLAT, AZ US
hist(datW$TAVE[datW$siteN == 4],
freq=FALSE,
main = paste(levels(datW$NAME)[4]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="dodgerblue",
border="white")
abline(v = mean(datW$TAVE[datW$siteN == 4],na.rm=TRUE),
col = "tomato3",
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 4],na.rm=TRUE) - sd(datW$TAVE[datW$siteN == 4],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
abline(v = mean(datW$TAVE[datW$siteN == 4],na.rm=TRUE) + sd(datW$TAVE[datW$siteN == 4],na.rm=TRUE),
col = "tomato3",
lty = 3,
lwd = 3)
par(mar=c(5.1,4.1,4.1,2.1))
par(mfrow=c(1,1))
#make a histogram for the first site in our levels
#main= is the title name argument.
#Here you want to paste the actual name of the factor not the numeric index
#since that will be more meaningful.
#note I've named the histogram so I can reference it later
h1 <- hist(datW$TAVE[datW$siteN == 1],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="grey50",
border="white")
#the seq function generates a sequence of numbers that we can use to plot the normal across the range of temperature values
x.plot <- seq(-10,30, length.out = 100)
#the dnorm function will produce the probability density based on a mean and standard deviation.
y.plot <- dnorm(seq(-10,30, length.out = 100),
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#create a density that is scaled to fit in the plot since the density has a different range from the data density.
#!!! this is helpful for putting multiple things on the same plot
#!!! It might seem confusing at first. It means the maximum value of the plot is always the same between the two datasets on the plot. Here both plots share zero as a minimum.
y.scaled <- (max(h1$density)/max(y.plot)) * y.plot
#points function adds points or lines to a graph
#the first two arguements are the x coordinates and the y coordinates.
points(x.plot,
y.scaled,
type = "l",
col = "royalblue3",
lwd = 4,
lty = 2)
#pnorm(value to evaluate at (note this will evaluate for all values and below),mean, standard deviation)
pnorm(0,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom with 5 gives me all probability (area of the curve) below 5
pnorm(5,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom with 5 gives me all probability (area of the curve) below 5
pnorm(5,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))- pnorm(0,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom of 20 gives me all probability (area of the curve) below 20
#subtracting from one leaves me with the area above 20
1 - pnorm(20,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom of 20 gives me all probability (area of the curve) below 20
#subtracting from one leaves me with the area above 20
qnorm(0.95,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#Q6
#pnorm of 18.51026 gives me all probability below 18.51026
#subtracting from one leaves me with the area above 18.51026
1 - pnorm(18.51026, mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE) + 4,
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#Q7
#make a histogram of daily precipitation for Aberdeen.
hist(datW$PRCP[datW$siteN == 1],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Average daily precipitation",
ylab="Relative frequency",
col="grey50",
border="white")
#Q8
SumPRCP <- aggregate(datW$PRCP, by=list(datW$NAME, datW$year), FUN="sum",na.rm=TRUE)
colnames(SumPRCP) <- c("NAME","year", "PRCP")
SumPRCP
#histogram of annual precipitation at ABERDEEN, WA US.
hist(SumPRCP$PRCP[SumPRCP$NAME == "ABERDEEN, WA US"],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Annual precipitation",
ylab="Relative frequency",
col="grey50",
border="white")
#Q9
#calculate mean of the annual precipitation for all sites
mean_annual <- aggregate(datW$PRCP, by=list(datW$NAME), FUN="mean",na.rm=TRUE)
colnames(mean_annual) <- c("NAME","PRCP")
mean_annual
averageTemp
|
149541be57d02622a58a058f5eb6277767e04110
|
f0e646a57a90c4b7b7fe57d9f5601bae24f908ab
|
/data-raw/vic.R
|
4ed8c4de50031eeac4034fd5bf35abff26d21d31
|
[] |
no_license
|
adam-gruer/ozbabynames
|
d7e60b4256b1c3621aec09199801cafdfe625957
|
d1aff4cac93c58197743717e0a0cd7ff6a83af1d
|
refs/heads/master
| 2020-04-07T20:40:17.319725
| 2018-11-22T12:56:03
| 2018-11-22T12:56:03
| 158,698,482
| 1
| 0
| null | 2018-11-22T12:48:44
| 2018-11-22T12:48:44
| null |
UTF-8
|
R
| false
| false
| 549
|
r
|
vic.R
|
library(purrr)
library(tidyverse)
library(readxl)
# Fix files
vic <- map_dfr(fs::dir_ls("data-raw/vic"), function(x){
fname <- tools::file_path_sans_ext(x)
out <- read_excel(x, skip = 2)
male <- out[1:3]
male$sex <- "Male"
female <- set_names(out[5:7], names(out)[1:3])
female$sex <- "Female"
rbind(male, female) %>%
mutate(year = substr(fname, nchar(fname)-3, nchar(fname)))
})
vic <- vic %>%
rename_all(tolower) %>%
select(name, sex, year, count) %>%
mutate(year = as.integer(year),
count = as.integer(count))
|
eb1fc0f0ee9a8afb256f1881376918fc0d733ed7
|
10d35ea866d69d940b6104c8bde2aef0cc83f2bd
|
/inst/examples/calendar.R
|
f6aed36640084d3de581aad87785d8c9cc6f2a56
|
[] |
no_license
|
omegahat/RwxWidgets
|
95b0a01bde4654bf5caed772bb3b8492f14f047b
|
aff0f3fb9b928ebbcda4258b1d37eb5d799a1ac2
|
refs/heads/master
| 2021-01-10T05:54:17.063492
| 2012-02-22T01:54:38
| 2012-02-22T01:54:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
calendar.R
|
library(RwxWidgets)
wxInit()
f = RFrame( size = c(50, 50))
cal = wxCalendarCtrl(f)
f$SetSizer(sz <- wxBoxSizer(wxHORIZONTAL))
sz$Add(cal, 1, wxEXPAND)
cal$SetSize(50, 50)
f$SetSizer(sz)
sz$SetSizeHints(f)
f$Show()
print(f$GetChildren())
#wxEventLoop()$Run()
|
87a1c36e23cb66f54d5f1c9c89e6abdc9f08e5ca
|
2271a5faab43855132dd5bea92031b5433932bbc
|
/R/gl.costdistances.r
|
5beb4423687d4a7572ed6e25f9811974d18fdd30
|
[] |
no_license
|
Konoutan/dartR
|
26252e126e5f38589e21f726e3777360390a8005
|
aa35a02121aff4fb092b3f88e5200d343938656b
|
refs/heads/master
| 2022-06-30T13:41:35.734109
| 2019-12-05T08:58:16
| 2019-12-05T08:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
gl.costdistances.r
|
#'Calculates cost distances for a given landscape (resistance matrix)
#'
#'@param landscape a raster object coding the resistance of the landscape
#'@param locs coordinates of the subpopulations. If a genlight object is provided coordinates are taken from @other$latlong and centers for population (pop(gl)) are calculated. In case you want to calculate costdistances between individuals redefine pop(gl) via: \code{pop(gl)<- indNames(gl)}.
#'@param method defines the type of cost distance, types are "least-cost", "rSPDistance" or "commute (Circuitscape type)"
#'@param NN number of next neighbours recommendation is 8
#'@return a costdistance matrix between all pairs of locs
#'@description calculates a cost distance matrix, to be used with run.popgensim
#'@importFrom gdistance costDistance rSPDistance commuteDistance
#' @export
gl.costdistances <- function(landscape, locs, method, NN)
{
if (is(locs,"genlight"))
{
if (is.null(locs@other$latlong)) stop("no locations were provided in the genlight object [@other$latlong].\n")
if (is.null(pop(locs)))
{
cat("No population definition provided, hence I will calculate costdistances between individuals\n")
pop(locs) <- indNames(locs)
if (is.null(pop(locs))) pop(locs)<- 1:nInd(locs)
}
locs <- apply(locs@other$latlong,2,function(x) tapply(x, pop(locs), mean))
} else locs <- as.matrix(locs)
fric.mat <- transition(landscape,function(x) 1/x[2],NN)
#set distances to meters if no projected already
fric.mat@crs@projargs<- "+proj=merc +units=m"
fric.mat.cor <- geoCorrection(fric.mat)
if (method=="leastcost") cd.mat <-costDistance(fric.mat.cor, locs, locs)
if (method=="rSPDistance") cd.mat <- rSPDistance(fric.mat.cor, locs, locs, theta=1)
if (method=="commute") cd.mat <-as.matrix(commuteDistance(fric.mat.cor,locs))
colnames(cd.mat) <- row.names(locs)
rownames(cd.mat) <- row.names(locs)
return (cd.mat)
}
|
af6fef787ca434201a0c7594ca447547fa82e27a
|
c5c30dd82371c65f9ecc7170bd9460859472b008
|
/deterministic_ngm_calc.R
|
e7fe149f7a0216bcd30e9644e1eff6c6c4c584ae
|
[] |
no_license
|
renatamuy/core_matrix_publish
|
2e05af873aa202b06a1096d224ad8be8d436fbe7
|
0a6edab32faf67e7c43e261f5a694ff4ca1ce84f
|
refs/heads/master
| 2021-06-26T20:58:05.004053
| 2017-09-14T10:17:39
| 2017-09-14T10:17:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,166
|
r
|
deterministic_ngm_calc.R
|
############################################################
# Code for NGM R0
## FD & DD transmission
############################################################
# R0 at different phi + psi
# just by calculating a pathogen that comes into a completely naive population
# at carrying capacity for that proportion forested
############################################################
rm(list = ls())
library(matrixcalc)
source('deterministic_ngm_cm_func.R')
# Range of phi
phi = seq(0.01, 1, by = 0.001) #vector for different forested proportions
epsilon = rep.int(1,length(phi)) # no change in edge effects
#epsilon = (1+cos(phi*(pi*3/2)-2.3))
btw= seq(0,1, by=0.05) # aka psi
############################################################
## DENSITY DEPENDENT R0
# Parameters defined (core; matrix) for DD transmission
paramsD <- list(d = c(0.1, 0.02),
k = c(100, 100),
beta.wDD = c(0.00136, 0.0004),
gamma = c(0.03, 0.05),
alpha = c(0.001, 0.01),
sigma = c(0.05, 0.05))
R0C = 0.5
R0M = 1.5
(paramsD[['alpha']][1] + paramsD[['gamma']][1] + paramsD[['d']][1])*R0C/paramsD[['k']][1]
(paramsD[['alpha']][2] + paramsD[['gamma']][2] + paramsD[['d']][2])*R0M/paramsD[['k']][2]
R0_c = (paramsD[['beta.wDD']][1]*paramsD[['k']][1])/(paramsD[['alpha']][1] + paramsD[['gamma']][1] + paramsD[['d']][1])
R0_m = (paramsD[['beta.wDD']][2]*paramsD[['k']][2])/(paramsD[['alpha']][2] + paramsD[['gamma']][2] + paramsD[['d']][2])
print(c(R0_c,R0_m))
R0.c.dd = numeric(length(phi)) #assuming no matrix species present (or not transmissible)
R0.m.dd = numeric(length(phi)) #assuming no patch species present (or not transmissible)
R0.combin.dd = data.frame(matrix(NA, nrow = length(btw), ncol = length(phi)))
for (j in 1:length(btw)){
for (i in 1:length(phi)){
mat_F <- matrix.F.DD(paramsD[["beta.wDD"]], paramsD[["beta.wDD"]]*btw[j], epsilon[i], paramsD[['k']], phi[i])
mat_V <- matrix.V(paramsD[["alpha"]], paramsD[["gamma"]], paramsD[["d"]])
mat_G <- matrix.inverse(mat_V) %*% mat_F
eigen.ngm.dd <- eigen(mat_G)
R0.combin.dd[j,i] = eigen.ngm.dd$values[[1]]
R0.c.dd[i] = (paramsD[["beta.wDD"]][1]*paramsD[["k"]][1]*(1.01-phi[i]))/
(paramsD[["alpha"]][1] + paramsD[["gamma"]][1] + paramsD[["d"]][1])
R0.m.dd[i] = (paramsD[["beta.wDD"]][2]*paramsD[["k"]][2]*phi[i])/
(paramsD[["alpha"]][2] + paramsD[["gamma"]][2] + paramsD[["d"]][2])
}
}
plotR0(phi, R0.c.dd, R0.m.dd, R0.combin.dd)
#export for plotting
dim(R0.combin.dd)
export = rbind(R0.combin.dd,R0.c.dd,R0.m.dd)
dim(export)
#write.csv(export,'output/scenario3_DD.csv')
############################################################
## FREQUENCY DEPENDENT R0
beta_c = R0_c*(paramsD[['alpha']][1] + paramsD[['gamma']][1] + paramsD[['d']][1])
beta_m = R0_m*(paramsD[['alpha']][2] + paramsD[['gamma']][2] + paramsD[['d']][2])
print(c(beta_c, beta_m))
# R0; plotting within and between host R0
paramsF <- list(d = c(0.1, 0.02),
k = c(100, 100),
beta.wFD = c(beta_c, beta_m),
gamma = c(0.03, 0.05),
alpha = c(0.001, 0.01),
sigma = c(0.05, 0.05))
R0.m.fd = numeric(length(phi)) #
R0.c.fd = numeric(length(phi)) #
R0.combin.fd = data.frame(matrix(NA, nrow = length(btw), ncol = length(phi)))
for (j in 1:length(btw)){
for (i in 1:length(phi)){
mat_F <- matrix.F.FD(paramsF[["beta.wFD"]], paramsF[["beta.wFD"]]*btw[j],
epsilon[i])
mat_V <- matrix.V(paramsF[["alpha"]], paramsF[["gamma"]], paramsF[["d"]])
mat_G <- matrix.inverse(mat_V) %*% mat_F
eigen.ngm.fd <- eigen(mat_G)
R0.combin.fd[j,i] = eigen.ngm.fd$values[[1]]
R0.c.fd[i] = paramsF[["beta.wFD"]][1]/
(paramsF[["alpha"]][1] + paramsF[["gamma"]][1] + paramsF[["d"]][1])
R0.m.fd[i] = paramsF[["beta.wFD"]][2]/
(paramsF[["alpha"]][2] + paramsF[["gamma"]][2] + paramsF[["d"]][2])
}
}
#export for plotting
dim(R0.combin.fd)
plotR0(phi, R0.c.fd, R0.m.fd, R0.combin.fd)
export = rbind(R0.combin.fd,R0.c.fd,R0.m.fd)
dim(export)
#write.csv(export,'deterministic/output/scenario3_FD.csv')
|
a939f2193eba3bbf3ea1340d6f30dfcd825e75f5
|
12854f77ea56109c7df0b4f59480589f623fe70f
|
/Logistic_Graded.R
|
9e44e36dd2672bf9f6e69307bb3f649f6867cd4e
|
[] |
no_license
|
souravbiswas1/RLog
|
0359a6eb61fe596dee1b4bab0d93d3a6b09c0ccc
|
3a0a143d683829da975d4233c0c3f7620b78532e
|
refs/heads/master
| 2020-03-19T17:04:28.454397
| 2018-06-09T17:49:28
| 2018-06-09T17:49:28
| 136,742,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,471
|
r
|
Logistic_Graded.R
|
#Setting the path of the working directory::
setwd("E:\\Jigsaw\\Analytics with R\\Work\\Logistic_Regression")
library(dplyr)
library(gains)
library(irr)
library(caret)
#Reading the file::
gf<-read.csv("goodforu (1).csv")
#Some exploratory data analysis and findings::
summary(gf)
#Checking the no of rows & columns::
dim(gf)
#Names of the column name::
names(gf)
#Structure of the dataset::
str(gf)
#Total no of missing value::
sum(is.na(gf))
#Segregating the customers having good and bad perception upon Brand A as per the given rankings::
gf<-gf%>%mutate(Target_A=ifelse(X23>4,1,0))
#gf<-select(gf,-X23)
#Question No.2::
Zero_count<-gf%>%filter(Target_A==0)%>%summarise(Total_Zero=n())
#making the variable numeric::
Zero_count$Total_Zero<-as.numeric(Zero_count$Total_Zero)
#Total no of rows::
Total_Count<-gf%>%summarise(Count=n())
#Percentage of score 4 or less::
Percentage<-round(((Zero_count$Total_Zero/Total_Count$Count)*100),2)
#Question No.3::
One_count<-gf%>%filter(Target_A==1)%>%summarise(Total_One=n())
print(One_count)
#Question No.10::
gf%>%filter(X2==1)%>%summarise(Total_Count=n())
#Question No.11::
gf%>%filter(X16==1)%>%summarise(Total_Count=n())
#Question No.12::
gf%>%filter(X9==1)%>%summarise(Total_Count=n())
#Question No.4::
#Data preparation for Zero Trans fat variables for Brand A::
gf$transFat_A<-ifelse(gf$X9==2,0,1)
gf$farmGrown_A<-ifelse(gf$X2==2,0,1)
gf$naturalOils_A<-ifelse(gf$X16==2,0,1)
gf$miniProc_A<-ifelse(gf$X30>4,1,0)
#Exploratory data analysis::
#Farm grown ingredients analysis:
table(gf$farmGrown_A,gf$Target_A)
#Zero gram trans fat analysis::
table(gf$transFat_A,gf$Target_A)
#Analysis with whether the chips are made with natural oils::
table(gf$naturalOils_A,gf$Target_A)
#Analysis with whether the chips are minimally processed::
table(gf$miniProc_A,gf$Target_A)
#Splitting the dataset::
set.seed(200)
index<-sample(nrow(gf),0.70*nrow(gf),replace = F)
train<-gf[index,]
test<-gf[-index,]
#Building the model::
mod1<-glm(Target_A~transFat_A+farmGrown_A+naturalOils_A+miniProc_A,data = train,family = "binomial")
summary(mod1)
#Predicting the probability of the test dataset using the existing model::
pred<-predict(mod1,type = "response",newdata = test)
head(pred)
summary(pred)
#Finding the proportion of the good perception on Brand A according to the initial dataset::
round((One_count$Total_One/Total_Count$Count),3)
test$pred<-ifelse(pred>0.49,1,0)
#The Kappa metric::
kappa2(data.frame(test$Target_A,test$pred))
#the confusion matrix::
confusionMatrix(factor(test$pred),factor(test$Target_A),positive = "1")
#Creating gain chart::
gains(test$Target_A,predict(mod1,type = "response",newdata = test),groups = 10)
test$prob<-predict(mod1,type = "response",newdata = test)
quantile(test$prob,prob=c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99))
#Plotting ROC,the area under the curve::
library(pROC)
g <- roc(Target_A ~ prob, data = test)
plot(g)
#X2---Farm grown ingredients...
#X9---zero gram trans fat...
#X16---Made with natural oils...
#X30---Minimally processed...
#Question No.4,5,6,7,8::
mod2<-glm(Target_A~X2+X9+X16+X30,data = gf,family = "binomial")
summary(mod2)
#-----------------------------------------------------------------------------------------------------
#Building the model on original dataset::
mod3<-glm(Target_A~transFat_A+farmGrown_A+naturalOils_A+miniProc_A,data = train,family = "binomial")
summary(mod3)
#Predicting the probability of the original dataset using the existing model::
pred<-predict(mod3,type = "response",newdata = gf)
head(pred)
summary(pred)
#Finding the proportion of the good perception on Brand A according to the initial dataset::
round((One_count$Total_One/Total_Count$Count),3)
gf$pred<-ifelse(pred>0.49,1,0)
#The Kappa metric::
kappa2(data.frame(gf$Target_A,gf$pred))
#the confusion matrix::
confusionMatrix(factor(gf$pred),factor(gf$Target_A),positive = "1")
#Creating gain chart::
gains(gf$Target_A,predict(mod3,type = "response",newdata = gf),groups = 10)
gf$prob<-predict(mod3,type = "response",newdata = gf)
quantile(gf$prob,prob=c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0))
#Plotting ROC,the area under the curve::
library(pROC)
h <- roc(Target_A ~ prob, data = gf)
plot(h)
#Scoring of the customer as per the perception on Brand A::
targeted<-gf[gf$prob>0.68 & gf$prob<0.81,"Panel.ID"]
targeted<-as.data.frame(targeted)
write.csv(targeted,"targeted.csv",row.names = F)
|
95353f1051d36a0759215d75da05dc3a2d6a7f10
|
370d5b17a744b6dc41d80f0e0492260ea32fc6ba
|
/man/noirot.contribution.Rd
|
fd0d202036f3f28e2d3ff1ef9c9859c21bf4c742
|
[
"MIT"
] |
permissive
|
keocorak/CoreComp
|
a6d0cc50a3bcc9dc2947819e682be03c7d98c2ec
|
076e5984afc28c02c91446ae22daee752788745d
|
refs/heads/main
| 2023-01-31T16:36:49.167609
| 2020-12-17T20:03:45
| 2020-12-17T20:03:45
| 318,306,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 789
|
rd
|
noirot.contribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genorep.R
\name{noirot.contribution}
\alias{noirot.contribution}
\title{Noirot Principal Component scoring}
\usage{
noirot.contribution(geno.dist.all, core.names, k = 2)
}
\arguments{
\item{geno.dist.all}{genetic distance matrix}
\item{core.names}{character vector of entry names to be included in core}
\item{k}{the maximum dimensions in which to represent data; must be less than n}
}
\value{
PC score
}
\description{
Perform multidimensional scaling of genotypic distance matrix and calculate summed relative contribution of entries in core set following Noirot et al. (1996).
}
\examples{
core<-dist.core(as.matrix(dist(fake_geno)), n.clust=9)
noirot.contribution(as.matrix(dist(fake_geno)), core$name)
}
|
76114f963b5aa073fea5c4c5d3e1769bda55260d
|
21d39115a575d6f403d6485a0744aae126d60daf
|
/R/data.R
|
e02dc009b17779690386d246c54a45bb59df46c8
|
[] |
no_license
|
nealhaddaway/discoverableresearch
|
33842b2dd14f3f69fa5e3367520b60b8e46ccfeb
|
63e6f061be0efca28a62e1d994ec828075b04040
|
refs/heads/master
| 2023-03-07T13:11:54.420346
| 2020-10-05T06:04:26
| 2020-10-05T06:04:26
| 294,333,669
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
data.R
|
#' Languages codes synthesisr can recognize
#'
#' A dataset of the languages that can be recognized by
#' synthesisr along with their short form, character encoding,
#' and whether a scientific journal indexed in 'ulrich' uses them.
#'
#' @source 'litsearchr' package on 'Github'
#' @format A database with 53 rows of 4 variables:
#' \describe{
#' \item{Short}{the short form language code}
#' \item{Language}{the name of the language}
#' \item{Encoding}{which character encoding to use for a language}
#' \item{Used}{whether or not the language is used by a scientific journal}
#' }
#' @examples
#' \donttest{
#' possible_langs
#' }
"possible_langs"
|
96445af71d1f0f25350364836c5efac9555484f0
|
f85d09d41ee157807577a480d634ae21c92b0631
|
/SL_KNN&Boosting_ML/Amazon_Sourcecode_boosting/Amazon_Boosting.R
|
6b66aebe3df6c40c6b185c6d19cb5368861ac747
|
[] |
no_license
|
fzachariah/Machine-Learning
|
f49564fd21a7d196a9cc77066b8136d5fb74e05b
|
090b64feacca942eb56942d81059aabae4860cf5
|
refs/heads/master
| 2021-01-16T17:49:09.352659
| 2017-05-17T01:41:23
| 2017-05-17T01:41:23
| 87,450,538
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
r
|
Amazon_Boosting.R
|
library(class)
library(caret)
library(caretEnsemble)
Amazon_train_data <- read.csv(file = "sentiment_train.csv", header = TRUE)
Amazon_train_data$rating <- as.factor(Amazon_train_data$rating)
Amazon_test_data <- read.csv(file = "sentiment_test.csv", header = TRUE)
Amazon_test_data$rating <- as.factor(Amazon_test_data$rating)
sample_data <- sample(1:nrow(Amazon_train_data), 0.35 * nrow(Amazon_train_data))
control <- trainControl(method="repeatedcv", number=5, repeats=3)
# C5.0
model_c50 <- train(rating ~ negative_score + positive_score + neutral_score + compound_value,
data = Amazon_train_data, method = "C5.0", metric = "Accuracy", trControl = control)
# Gradient Boosting
model_gbm <- train(rating ~ negative_score + positive_score + neutral_score + compound_value,
data = Amazon_train_data, method = "gbm", metric = "Accuracy", trControl = control, verbose = FALSE)
# summarize results
boosting_results <- resamples(list(c5.0 = model_c50, gbm = model_gbm))
summary(boosting_results)
dotplot(boosting_results)
pr.c50 <- predict(model_c50, Amazon_test_data)
mean(Amazon_test_data$rating == pr.c50)
pr.gbm <- predict(model_gbm, Amazon_test_data)
mean(Amazon_test_data$rating == pr.gbm)
|
8a5e475764db8795d8be75f8a3925316604e0bf3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Momocs/examples/coo_aligncalliper.Rd.R
|
2535a3e69154c2629de4caee7487d87c639f600a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
coo_aligncalliper.Rd.R
|
library(Momocs)
### Name: coo_aligncalliper
### Title: Aligns shapes along their 'calliper length'
### Aliases: coo_aligncalliper
### ** Examples
## Not run:
##D b <- bot[1]
##D coo_plot(b)
##D coo_plot(coo_aligncalliper(b))
##D bot.al <- coo_aligncalliper(bot)
##D stack(bot.al)
## End(Not run)
|
05071c3602ba14658e7527de2fa5d2abe4e622bb
|
96e1fa91df73ec4d9b97b25914860b7bb37c1183
|
/GeneTonic/GeneTonic-sticker.R
|
6f1bf9aecd087c6eface32e1aa50e9d8fd34e647
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"CC-BY-3.0",
"CC-BY-4.0",
"CC-BY-2.0"
] |
permissive
|
Bioconductor/BiocStickers
|
52113b315b85400fb04c1325f8f3d02eab5559ea
|
1a0cba2d4041c9ef41fd29d0c4a35c62911b71dd
|
refs/heads/devel
| 2023-07-23T20:59:38.116853
| 2023-07-13T19:12:37
| 2023-07-13T19:12:37
| 83,644,497
| 118
| 107
|
NOASSERTION
| 2023-09-09T13:31:35
| 2017-03-02T06:46:55
|
R
|
UTF-8
|
R
| false
| false
| 1,043
|
r
|
GeneTonic-sticker.R
|
# to be done out of this script:
# - import svg of gin tonic
# - export to hi-res png
# - assemble in powerpoint with dna helix
# - group up and export as picture (GT_logo_full.png)
# Assembling all the pieces together --------------------------------------
library(ggplot2)
library(png)
library(grid)
library(hexSticker)
## Settings:
col_border <- "#264095" ## some nice dark blue
col_bg <- "#1A81C2" ## nice full blue
col_text <- "#FFFFFF" ## white
img_file <- ("GT_logo_full.png")
img <- readPNG(img_file)
sticker(img_file,
package="GeneTonic",
p_size = 7.5,
p_family = "Aller_Lt",
p_color = col_text,
s_x = 0.92,
s_y = 0.77,
s_width = 0.52,
s_height = 0.52,
h_fill = col_bg,
h_color = col_border,
h_size = 1.5,
spotlight = FALSE,
url = "www.bioconductor.org",
u_color = col_border,
filename="GeneTonic.pdf"
)
# afterwards: export to png via Preview or similar applications
|
f4353fa7e1ba697661ba9ddcc10820ca8dae7908
|
77c6fc544f7737a5317fd09e2fe78f1e99f70027
|
/Kyou-san/rgl_igraph2.R
|
0490cc172be202e22c330bed98324e9563a0ade4
|
[] |
no_license
|
ryamada22/R
|
bdbf5a01397ad4da86c271d94b97d302d426a346
|
a142edc21c5ab188d2bf531297b2e02ff2c49c15
|
refs/heads/master
| 2020-04-04T00:28:50.447554
| 2019-08-28T07:59:13
| 2019-08-28T07:59:13
| 29,048,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
rgl_igraph2.R
|
library(rgl) # package for 3d object handling
# reading the bunny.obj in your fair zip
bunny <- readOBJ("bunny.obj")
library(igraph) # package for graph theory
# 3D coordinates of vertices in the shape of n x 3 matrix
V.xyz <- t(bunny[[1]][1:3,])
# Enumerate edges of triangle in n x 2 matrix shape
Edges <- rbind(t(bunny[[2]][1:2,]),t(bunny[[2]][2:3,]),t(bunny[[2]][c(3,1),]))
# Remove duplicates of edges
Edges <- unique(Edges)
# length of edges
Edge.len <- sqrt(apply((V.xyz[Edges[,1],] - V.xyz[Edges[,2],])^2,1,sum))
# make a graph object
g <- graph.edgelist(Edges)
# distance on the graph
d <- distances(g,weights=Edge.len)
### Post-spherization
# Spherization maps all the vertices on the bunny on the S2 sphere
# Along the geodesics on the S2 sphere, we can "re-measure" the distance.
# The geodesics on the S2 sphere is drawn back on the bunny and
# along the geodesics, we can measure the "distance between vertices on the bunny".
# We can make the distance matrix of all vertices pairs along this back-to-the-bunny geodesics.
# Can you compare the two distance matrices; one is the distance matrix that the R codes above generates and the other is the back-to-the-bunny distance matrix.
# Showing distance from a point.
# Normalization of distance value ranging [0,1] so that you can use the values for color function rgb arguments
d. <- d/max(d)
# Coloring the vertices with distance from the n-th vertex
# Select one of (many vertices) arbitrarily
# The color of bunny should depend on the distance from the n-th vertex.
n <- 30
# rgb() function takes three [0,1] real values specifying red/green/blue
# to generate colors
# d.[n,] is the vector of normalized distance from the n-th vertex to all vertices
col <- rgb(d.[n,],1-d.[n,],1)
# Integer values >= 1 are generated depending on distance
col2 <- ceiling(d.[n,] * 15)+1
# plot them
plot3d(V.xyz)
spheres3d(V.xyz,radius=0.005,col=col2)
# When you get different distance matrix
# you can replace the object d to the new distance matrix
# and draw the similar contours.
# Then, you can visually compare two distance matrices.
n <- 30
col <- rgb(d.[n,],1-d.[n,],1)
col2 <- ceiling(d.[n,] * 15)+1
# color values are 1 or 2
col3 <- col2 %% 2 + 1
plot3d(V.xyz)
spheres3d(V.xyz,radius=0.005,col=col3)
# Change the vertex, distance form which determines colors
n2 <- 400
col_ <- rgb(d.[n2,],1-d.[n2,],1)
col2_ <- ceiling(d.[n2,] * 15)+1
# color values are 1 or 2
col3_ <- col2_ %% 2 + 1
plot3d(V.xyz)
spheres3d(V.xyz,radius=0.005,col=col3_)
# combining two distance vectors from two vertices and generate color values 1 or 2
# This should make square lattice type colorings
plot3d(V.xyz)
spheres3d(V.xyz,radius=0.005,col=as.numeric(col3 == col3_)+1)
|
41203f341022a13288ec334803fb2202f5c39e9b
|
9f6c0f270a3a49493a1c405b90d363327f782898
|
/man/nnetPredInt.Rd
|
4ab206d1d272bd27a925f926675771c93e081e87
|
[] |
no_license
|
diegomerlanop/nnetpredint
|
714ff83eff6e8c8016bd98e3e13a0ee05e6794fb
|
549fd65616cefd9c042381ebe23bfacf70b81c00
|
refs/heads/master
| 2021-05-30T22:54:55.797432
| 2015-12-21T21:35:34
| 2015-12-21T21:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,261
|
rd
|
nnetPredInt.Rd
|
\name{nnetPredInt}
\alias{nnetPredInt}
\alias{nnetPredInt.default}
\alias{nnetPredInt.nnet}
\alias{nnetPredInt.nn}
\alias{nnetPredInt.rsnns}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Prediction Intervals of Neural Networks
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Get the prediction intervals of new dataset at certain confidence level based on the training datasets and the gradient at weight parameters of the neural network model.
}
\usage{
nnetPredInt(object, ...)
\method{nnetPredInt}{default}(object = NULL, xTrain, yTrain, yFit, node, wts, newData,
alpha = 0.05 , lambda = 0.5, funName = 'sigmoid', \dots)
\method{nnetPredInt}{nnet}(object, xTrain, yTrain, newData, alpha = 0.05, lambda = 0.5,
funName = 'sigmoid', \dots)
\method{nnetPredInt}{nn}(object, xTrain, yTrain, newData, alpha = 0.05, lambda = 0.5,
funName = 'sigmoid', \dots)
\method{nnetPredInt}{rsnns}(object, xTrain, yTrain, newData, alpha = 0.05, lambda = 0.5,
funName = 'sigmoid', \dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
object of class: nnet as returned by 'nnet' package, nn as returned by 'neuralnet' package, rsnns as returned by 'RSNNS' package. Object set as NULL will use the default method which takes the weight parameters as the input from user.
}
\item{xTrain}{
matrix or data frame of input values for the training dataset.
}
\item{yTrain}{
vector of target values for the training dataset.
}
\item{newData}{
matrix or data frame of the prediction dataset.
}
\item{yFit}{
vector of the fitted values, as the output produced by the training model, e.g. nnet$fitted.values ('nnet') , nn$net.result[[1]] ('neuralnet') and rsnns$fitted.values ('RSNNS')
}
\item{node}{
a vector of integers specifying the number of hidden nodes in each layer. Multi-layer network has the structure (s0, s1, ..., sm), in which s0 denotes the dimension for input layer and sm denotes the dimension of the output layer. sm is usually set as 1.
}
\item{wts}{
a numeric vector of optimal weight parameters as the output of the neural network training model. The order of wts parameter is as follows: For any node i in layer k: c(bias ik, wi1k,wi2k,...wijk).
}
\item{}{
nnet object, returned by 'nnet' package. We can directly set the wts as: wts = nnet$wts
}
\item{}{
nn object, returned by 'neuralnet' package. We need to use \link{transWeightListToVect} function to transform the list of weights to a single vector first: wts = transWeightListToVect(wtsList, m).
}
\item{}{
rsnns object, returned by 'RSNNS' package. We need to transform and combine the weight and bias parameters to a single vector: weightMatrix(object) and extractNetInfo(object)$unitDefinitions$unitBias.
}
\item{alpha}{
confidence level. The confidence level is set to (1-alpha). In default, alpha = 0.05.
}
\item{lambda}{
decay parameter of weights when the Jacobian matrix of training dataset is singular. In default, lamda is set to 0.5 .
}
\item{funName}{
activation function name of neuron, e.g. 'sigmoid', 'tanh', etc. In default, it is set to 'sigmoid'.
}
\item{...}{
additional arguments passed to the method.
}
}
\value{
data frame of the prediction intervals, including prediction value, lower and upper bounds of the interval.
\item{yPredValue}{
%% ~~Describe \code{x} here~~
the column of prediction value in the data frame.
}
\item{lowerBound}{
%% ~~Describe \code{x} here~~
the column of prediction lower bounds in the data frame.
}
\item{upperBound}{
%% ~~Describe \code{x} here~~
the column of prediction upper bounds in the data frame.
}
}
\references{
%% ~put references to the literature/web site here ~
De Veaux R. D., Schumi J., Schweinsberg J., Ungar L. H., 1998, "Prediction intervals for neural networks via nonlinear regression", Technometrics 40(4): 273-282.
Chryssolouris G., Lee M., Ramsey A., "Confidence interval prediction for neural networks models", IEEE Trans. Neural Networks, 7 (1), 1996, pp. 229-232.
'neuralnet' package by Stefan Fritsch, Frauke Guenther.
'nnet' package by Brian Ripley, William Venables.
'RSNNS' package by Christoph Bergmeir, Jose M. Benitez.
}
\author{
%% ~~who you are~~
Xichen Ding <rockingdingo@gmail.com>
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{transWeightListToVect}}
\code{\link{jacobian}}
}
\examples{
# Example 1: Using the nn object trained by neuralnet package
set.seed(500)
library(MASS)
data <- Boston
maxs <- apply(data, 2, max)
mins <- apply(data, 2, min)
scaled <- as.data.frame(scale(data, center = mins, scale = maxs - mins)) # normalization
index <- sample(1:nrow(data),round(0.75*nrow(data)))
train_ <- scaled[index,]
test_ <- scaled[-index,]
library(neuralnet) # Training
n <- names(train_)
f <- as.formula(paste("medv ~", paste(n[!n \%in\% "medv"], collapse = " + ")))
nn <- neuralnet(f,data = train_,hidden = c(5,3),linear.output = FALSE)
plot(nn)
library(nnetpredint) # Getting prediction confidence interval
x <- train_[,-14]
y <- train_[,14]
newData <- test_[,-14]
# S3 generic method: Object of nn
yPredInt <- nnetPredInt(nn, x, y, newData)
print(yPredInt[1:20,])
# S3 default method for user defined weights input, without model object trained:
yFit <- c(nn$net.result[[1]])
nodeNum <- c(13,5,3,1)
m <- 3
wtsList <- nn$weights[[1]]
wts <- transWeightListToVect(wtsList,m)
yPredInt2 <- nnetPredInt(object = NULL, x, y, yFit, nodeNum, wts, newData, alpha = 0.05)
print(yPredInt2[1:20,])
# Compare to the predict values from the neuralnet Compute method
predValue <- compute(nn,newData)
print(matrix(predValue$net.result[1:20]))
# Example 2: Using the nnet object trained by nnet package
library(nnet)
xTrain <- rbind(cbind(runif(150,min = 0, max = 0.5),runif(150,min = 0, max = 0.5)) ,
cbind(runif(150,min = 0.5, max = 1),runif(150,min = 0.5, max = 1))
)
nObs <- dim(xTrain)[1]
yTrain <- 0.5 + 0.4 * sin(2* pi * xTrain \%*\% c(0.4,0.6)) +rnorm(nObs,mean = 0, sd = 0.05)
plot(xTrain \%*\% c(0.4,0.6),yTrain)
# Training nnet models
net <- nnet(yTrain ~ xTrain,size = 3, rang = 0.1,decay = 5e-4, maxit = 500)
yFit <- c(net$fitted.values)
nodeNum <- c(2,3,1)
wts <- net$wts
# New data for prediction intervals
library(nnetpredint)
newData <- cbind(seq(0,1,0.05),seq(0,1,0.05))
yTest <- 0.5 + 0.4 * sin(2* pi * newData \%*\% c(0.4,0.6))+rnorm(dim(newData)[1],
mean = 0, sd = 0.05)
# S3 generic method: Object of nnet
yPredInt <- nnetPredInt(net, xTrain, yTrain, newData)
print(yPredInt[1:20,])
# S3 default method: xTrain,yTrain,yFit,...
yPredInt2 <- nnetPredInt(object = NULL, xTrain, yTrain, yFit, node = nodeNum, wts = wts,
newData, alpha = 0.05, funName = 'sigmoid')
plot(newData \%*\% c(0.4,0.6),yTest,type = 'b')
lines(newData \%*\% c(0.4,0.6),yPredInt$yPredValue,type = 'b',col='blue')
lines(newData \%*\% c(0.4,0.6),yPredInt$lowerBound,type = 'b',col='red') # lower bound
lines(newData \%*\% c(0.4,0.6),yPredInt$upperBound,type = 'b',col='red') # upper bound
# Example 3: Using the rsnns object trained by RSNNS package
library(RSNNS)
data(iris)
#shuffle the vector
iris <- iris[sample(1:nrow(iris),length(1:nrow(iris))),1:ncol(iris)]
irisValues <- iris[,1:4]
irisTargets <- decodeClassLabels(iris[,5])[,'setosa']
iris <- splitForTrainingAndTest(irisValues, irisTargets, ratio=0.15)
iris <- normTrainingAndTestSet(iris)
model <- mlp(iris$inputsTrain, iris$targetsTrain, size=5, learnFuncParams=c(0.1),
maxit=50, inputsTest=iris$inputsTest, targetsTest=iris$targetsTest)
predictions <- predict(model,iris$inputsTest)
# Generating prediction intervals
library(nnetpredint)
# S3 Method for rsnns class prediction intervals
xTrain <- iris$inputsTrain
yTrain <- iris$targetsTrain
newData <- iris$inputsTest
yPredInt <- nnetPredInt(model, xTrain, yTrain, newData)
print(yPredInt[1:20,])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
8e605fc43b401a034880328999f1c11d922c8a3a
|
33945f7d8c8dc14d102638de7ec71d1e88413013
|
/cal/radius_cross.R
|
453ba78da696fe2c5e3690f52df94153bb0605dc
|
[] |
no_license
|
wactbprot/svol
|
9c483a87969cc5eddec68e6c5be8a2b60bad0e9e
|
57db9658fbd5b253bced0e7fa66471d79115364f
|
refs/heads/master
| 2021-01-18T14:05:39.453305
| 2015-02-05T12:16:08
| 2015-02-05T12:16:08
| 29,733,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
radius_cross.R
|
t1 <- read.table("data/ventil-sitz_1.txt"
, skip=2
, sep=" "
, row.names=NULL)
it1 <- which(t1[,1] == "SCN1")
it4 <- which(t1[,1] == "SCN4")
it5 <- which(t1[,1] == "SCN5")
mt <- as.matrix(t1[,3:5])
r <- sqrt(mt[, 1]^2 + mt[, 2]^2)
z <- mt[, 3]
brs <- 8.25
bre <- 8.75
## steigung wird nicht erreicht
i <- which(r > brs & r < bre)
plot(r,z)
points(r[i], z[i], col=2)
v2 <- read.table("data/ventil-kreis_2.txt"
, skip=2
, sep=" "
, row.names=NULL)
par(mfrow=c(3,1))
## Radius Abgleich bringt auch nichts
sn <- c("SCN1","SCN3","SCN4","SCN5")
is1 <- which(v2[,1] == sn[1])
is2 <- which(v2[,1] == sn[2])
is3 <- which(v2[,1] == sn[3])
is4 <- which(v2[,1] == sn[4])
mt <- as.matrix(v2[,3:5])
r <- sqrt(mt[, 1]^2 + mt[, 2]^2)
z <- mt[, 3]
rs1 <- r[is1]
i <- which(r > brs & r < bre)
zs1 <- z[is1]
plot(rs1, zs1)
points(rs1[i], zs1[i], col=2)
|
a74370aa4b6231c0d7e5f88dcaf1e4de4193d353
|
6e6202e97b13bead3f40ab7a141c2bc4fe8e9345
|
/sr-ch8.R
|
db73f431a5d0253d05e6a513b448acdbcbd5ae61
|
[] |
no_license
|
loudermilk/bayesian-stats
|
24b074d9b3775a2e193acb509c66b8ba3550417b
|
ca9840314183423e15ee80666b97fac43ee0b4d1
|
refs/heads/master
| 2021-01-09T20:13:57.391662
| 2016-09-19T12:46:39
| 2016-09-19T12:46:39
| 62,754,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,795
|
r
|
sr-ch8.R
|
## sr-ch8.R
## Chapter 8 - Markov Chain Monte Carlo
## 8.1 Good King Markov and His Island Kingdom
num_weeks <- 1e5
current <- 10
positions <- rep(0, num_weeks)
for (i in 1:num_weeks) {
positions[i] <- current
proposal <- current + sample(c(-1,1),size=1)
if (proposal < 1) proposal <- 10
if (proposal > 10) proposal <- 1
prob_move <- proposal/current
current <- ifelse(runif(1) < prob_move,proposal, current)
}
## 8.2.1 Gibbs Sampling
library(rethinking)
data(rugged)
d <- rugged
d$log_gdp <- log(d$rgdppc_2000)
dd <- d[complete.cases(d$rgdppc_2000),]
names(dd)
m8.1 <- map(
alist(
log_gdp ~ dnorm(mu, sigma),
mu <- a + bR*rugged + bA*cont_africa + bAR*rugged*cont_africa,
a ~ dnorm(0,100),
c(bR, bA, bAR) ~ dnorm(0,10),
sigma ~ dunif(0,10)
), data = dd
)
precis(m8.1)
## 8.3.1 Preparation.
## Fit the model using Hamiltonian Monte Carlo
## (1) preprocess all variable transformations (incl in df)
## (2) reduce df to only contain variables of interest
names(dd)
dd.trim <- dd[,c("rugged", "cont_africa", "log_gdp")]
str(dd.trim)
## 8.3.2 Estimation
m8.1stan <- map2stan(
alist(
log_gdp <- dnorm(mu, sigma),
mu <- a + bR*rugged + bA*cont_africa + bAR*rugged*cont_africa,
a ~ dnorm(0,100),
c(bR, bA, bAR) ~ dnorm(0,10),
sigma ~ dcauchy(0,2)
), data = dd.trim
)
precis(m8.1stan)
## n_eff - crude estimate of the number of independent samples you got
## Rhat - estimate of the convergence of the Markov chains to the target dist
## Rhat should approach 1.00 from above if all is well
## 8.3.3 Sampling again in parallel
## Using an existing stan model you can draw more samples from it, running
## as many independent Markov chains as you want.
m8.1stan_4chains <- map2stan(m8.1stan, chains = 4, cores = 4)
precis(m8.1stan_4chains)
## 8.3.4 Visualization
post <- extract.samples(m8.1stan)
class(post)
str(post)
length(post)
pairs(post)
pairs(m8.1stan)
## 8.3.5 Using the samples
show(m8.1stan)
## 8.3.6 Checking the chain
## Causes and solutions for malfunctions
## TRACE PLOT - plots the samples in sequential order joined by a line
plot(m8.1stan)
## Diagnosis - interpret the plot for a good chain:
## (1) staionarity - does the path stay within the posterior distribution?
## (2) well-mixed - no correlation with previous event -- zigzag is good
## Grey marks the adaption phase - learning to more efficiently sample
## from the postereior distribution (thus not necessarily reliable for inference)
## extract.samples only returns those in the white region
## 8.4 Care and feeding of your Markov chain
## 8.4.1 How many samples do you need?
## iter = 2000; warmup = iter/2
## 8.4.2 How many chains do you need?
## (1) when debugging use a single chain
## (2) when deciding whether chains are valid you need more than one chain
## (3) when you begin final run to make ineferences from you need one chain
## motto: four short chains to check, one long chain for inference
## 8.4.3 Taming a wild chain
## one problem w some models is that there are broad flat regions of the
## posterior density (typically caused by using flat priors). This can generate
## a wild wandering markov chain that erratically samples extremely positive and
## extremely negative parameter values.
y <- c(-1,1)
map8.2 <- map2stan(
alist(
y ~ dnorm(mu, sigma),
mu <- alpha
), data=list(y=y), start=list(alpha=0, sigma = 1), chains = 2, iter = 4000, warmup = 1000)
precis(map8.2)
plot(map8.2)
## Tame the chain by using weakly informative priors
## Flat priors say that every possible value of the parameter is equally
## plausible apriori.
y <- c(-1,1)
m8.3 <- map2stan(
alist(
y ~ dnorm(mu, sigma),
mu <- alpha,
alpha ~ dnorm(0,10),
sigma ~ dcauchy(0,1)
), data=list(y=y), start=list(alpha=0, sigma = 1), chains = 2, iter = 4000, warmup = 1000)
precis(m8.3)
plot(m8.3)
## 8.4.4 Non-identifiable parameters
## construct a non-identifiable model
y <- rnorm(100, mean = 0, sd = 1)
m8.4 <- map2stan(alist(
y ~ dnorm(mu, sigma),
mu <- a1 + a2,
sigma ~ dcauchy(0,1)
), data=list(y=y), start=list(a1=0,a2=0,sigma=1), chains=2, iter=4000, warmup=1000
)
## ^^ contains two parameters a1 & a2 that cannot be identified
precis(m8.4)
plot(m8.4)
## weak priors to the rescue!!!
m8.5 <- map2stan(
alist(
y ~ dnorm(mu, sigma),
mu <- a1 + a2,
c(a1,a2) ~ dnorm(0,10),
sigma ~ dcauchy(0,1)
), data=list(y=y), start=list(a1=0,a2=0,sigma=1), chains=2, iter=4000, warmup=1000
)
precis(m8.5)
plot(m8.5)
## 8H1. Run the model below and then inspect the posterior distribution and
## explain what it is accomplishing.
mp <- map2stan(
alist(
a ~ dnorm(0,1),
b ~ dcauchy(0,1)
),
data = list(y=1),
start = list(a=0,b=0),
iter = 1e4, warmup = 1000, WAIC=FALSE
)
precis(mp)
plot(mp)
|
db6f2e8590f82157aff6dacf0e1f34b6f3c30c85
|
3df31271dd49218652e1c654df9caeaaa22c5a26
|
/R/plot.R
|
0ab79baee0a65fadbb163339eae95eb2d9998827
|
[] |
no_license
|
lucasns/cleandata
|
adf38ec5de4565372503582c71e719b6b28d2d1e
|
c4b2d9095d60364c324d0c1a471dea697a3bfdc8
|
refs/heads/master
| 2021-06-19T07:55:46.530939
| 2017-07-07T12:34:43
| 2017-07-07T12:34:43
| 93,980,566
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,359
|
r
|
plot.R
|
plot_univar = function(dataset, var, type="histogram", modifier = NULL, rm_na = TRUE) {
if (is.null(dataset)) return()
if (rm_na == TRUE) {
dataset = dataset[!(is.na(dataset[[var]])),]
}
x = dataset[[var]]
if (!is.null(modifier)) {
x = apply_modifier(x, modifier)
var_name = paste(modifier, paste0("(", var, ")"))
} else {
var_name = var
}
plot_func = switch(type,
plot = function(x, var_name) {
plot(x, main = NULL, ylab = var_name)
},
histogram = function(x, var_name) {
hist(x, main = NULL, xlab = var_name)
},
boxplot = function(x, var_name) {
boxplot(x, main = NULL, xlab = var_name, ylab = "Value")
}
)
tryCatch({
plot_func(x, var_name)
return(TRUE)
}, error = function(cond) {
print(cond)
})
return(FALSE)
}
plot_bivar = function(dataset, var1, var2, type="bvboxplot", modifier = NULL, rm_na = TRUE) {
if (is.null(dataset)) return()
if (rm_na) {
dataset = dataset[!(is.na(dataset[[var1]]) | is.na(dataset[[var2]])),]
}
x = dataset[[var1]]
y = dataset[[var2]]
if (!is.null(modifier)) {
x = apply_modifier(x, modifier)
y = apply_modifier(y, modifier)
aux_df = data.frame(x,y)
aux_df = aux_df[is.finite(aux_df$x) & is.finite(aux_df$y), ]
x = aux_df[['x']]
y = aux_df[['y']]
xl = paste(modifier, paste0("(", var1, ")"))
yl = paste(modifier, paste0("(", var2, ")"))
} else {
xl = var1
yl = var2
}
plot_func = switch(type,
bvboxplot = function(x, y, xlab, ylab) {
asbio::bv.boxplot(x, y, bg = 'blue', bg.out = 'red', xlab = xlab, ylab = ylab)
},
bagplot = function(x, y, xlab, ylab) {
aplpack::bagplot(x, y, xlab = xlab, ylab = ylab)
},
plot = plot
)
tryCatch({
plot_func(x, y, xlab = xl, ylab = yl)
return(TRUE)
}, error = function(cond) {
print(cond)
})
return(FALSE)
}
|
e366248e8eaf634c9c65688a675035cdf5c64dfc
|
4636b573fdf11a69a243f67977564b2a5da8b8bf
|
/Chapter 39 Use API-wrapping packages.R
|
d4bfb63c4ffb4de3024e146da53d3d431843acbc
|
[] |
no_license
|
yuan1615/STAT545
|
16930c49fd6d5da94065b9252b381fdc7e54b285
|
0fa0ecf7c964063cab1c9e4199805ddc26162fe4
|
refs/heads/master
| 2020-08-14T13:16:20.129132
| 2019-10-24T12:20:07
| 2019-10-24T12:20:07
| 215,175,286
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,029
|
r
|
Chapter 39 Use API-wrapping packages.R
|
# 要点
# 公开的API接口
# rebird:获取一些鸟类的信息
# geonames:获取地名
# rplos:获取公共图书馆
##### Chapter 39 Use API-wrapping packages #####
#------ 39.1 Introduction ------
# 四种获取互联网数据的方法
#
# 单击并下载 -在互联网上以“平面”文件的形式出现,例如CSV,XLS。
# 安装并播放 -有人为其编写了便捷R包的API。
# API查询 -使用未包装的API发布。
# 搜寻 -隐含在HTML网站中。
#------ 39.2 Click-and-Download ------
# downloader::download() 用于SSL。
# curl::curl() 用于SSL。
# httr::GET以这种方式读取的数据需要稍后使用进行解析read.table()。
# rio::import()可以“直接从https://URL 读取多种常用数据格式”。这与以前不是很相似吗?
#
#------ 39.3 Data supplied on the web -----
# API
#------ 39.4 Install-and-play ------
# 许多常见的Web服务和API已被“包装”,即在它们周围编写了R函数,
# 这些函数将查询发送到服务器并格式化响应。
#---- 39.4.1 Load the tidyverse ----
library(tidyverse)
#---- 39.4.2 Sightings of birds: rebird ----
# rebird是eBird数据库的R接口
library(rebird)
#-- 39.4.2.1 Search birds by geography --
# 需要自己注册API的Key,这里理解就好
# 相当于股票数据Tushare的API接口!
# ebirdregion(loc = "L261851") %>%
# head() %>%
# kable()
#---- 39.4.3 Searching geographic info: geonames ----
# rOpenSci有一个名为geonames的软件包,用于访问GeoNames API
library(geonames)
# francedata <- countryInfo %>%
# filter(countryName == "France")
#---- 39.4.4 Wikipedia searching ----
# 利用维基百科搜索
# rio_english <- GNfindNearbyWikipedia(lat = -22.9083, lng = -43.1964,
# radius = 20, lang = "en", maxRows = 500)
# rio_portuguese <- GNfindNearbyWikipedia(lat = -22.9083, lng = -43.1964,
# radius = 20, lang = "pt", maxRows = 500)
#---- 39.4.5 Searching the Public Library of Science: rplos ----
library(rplos)
|
d56486b605b4f0c3cb435665cc185314fdcd381f
|
de0c103492d5c14cb74c32a60cd9642ec8a5e358
|
/Sequences_sunburst_files/script.r
|
0dfe0b9e7099d559999b2260e3eb58904d50beb2
|
[] |
no_license
|
jpiscionere/jpiscionere.github.io
|
bca0701b1702938ab474fa4ab1b960c1379adee9
|
f258ade7ae36ed2fffa0f538109988037d3ab051
|
refs/heads/master
| 2021-07-12T09:46:33.970444
| 2021-06-27T23:54:04
| 2021-06-27T23:54:04
| 45,433,187
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,013
|
r
|
script.r
|
data=read.csv("survey_data.csv")
summary(data)
genders=data$What.Gender.Do.You.Identify.As.
genders
races=data$How.Would.You.Identify.Your.Ethnicity.
races
graduates=data$Are.You.A.Graduate.Student
second=data$What.Best.Describes.Your.Second.Position.Out.of.Grad.School.2
first=data$What.Best.Describes.Your.First.Position.Out.of.Grad.School
third=data$What.Best.Describes.Your.Third.Position.Out.of.Grad.School.1
fourth=data$What.Best.Describes.Your.Fourth.Position.Out.of.Grad.School.1
fifth=data$What.Best.Describes.Your.Fifth.Position.Out.of.Grad.School.1
df=data.frame(genders,races,graduates,first,second,third,fourth,fifth)
data_matrix=as.matrix(df)
a[1:length(genders)]=paste(data_matrix[1:length(genders),1],"-",data_matrix[1:length(genders),2],"-",data_matrix[1:length(genders),3],"-",data_matrix[1:length(genders),4],"-",data_matrix[1:length(genders),5],"-",data_matrix[1:length(genders),6],"-",data_matrix[1:length(genders),7])
table(a)
df=data.frame(table(a))
write.table(df,file="output_third.txt")
|
5c4dce20b2c33a0711f36305739a666c7fa04355
|
c84dd226cf9f7fc21a205f1e9412e92eca7a4b93
|
/R/zzz.R
|
fa40d33ff9b01e417fb51857e044537a67553033
|
[] |
no_license
|
jrminter/minterbrand
|
14f839c17428bb656cdc0d630b882808b1140248
|
17c9422134ceb817d900aed0232fbfdb898d3869
|
refs/heads/master
| 2021-05-04T23:23:44.781313
| 2018-02-12T05:22:09
| 2018-02-12T05:22:09
| 120,148,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 90
|
r
|
zzz.R
|
.onAttach <- function(...) {
library(tidyverse)
library(knitr)
library(rmarkdown)
}
|
17070a5a059e1d710089db77927a8072f5920237
|
9163d726e145d9e1a36ef8d164dc0312d880616c
|
/PassiveData2016_Workflow.R
|
63814ed61c879b17f94c420efc5f6d6cfe6a0606
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
lukeloken/passive_tox
|
fca7b9804a929d7753110386ef99dbf362500382
|
45b75bcd307dbdd490ab529df1968c196ce0cd32
|
refs/heads/master
| 2021-10-25T01:53:10.100918
| 2021-10-19T19:48:27
| 2021-10-19T19:48:27
| 232,613,428
| 0
| 0
| null | 2020-01-08T17:00:12
| 2020-01-08T17:00:11
| null |
UTF-8
|
R
| false
| false
| 2,356
|
r
|
PassiveData2016_Workflow.R
|
# Workflow for analyzing passive sampler pesticide data
# Data are from 15 streams flowing into the Great Lakes
# Merge data with Sam Oliver's paper
# Luke Loken
# January 2020
# path_to_data <- c('C:/Users/lloken/OneDrive - DOI/GLRI_Pesticides')
path_to_data <- c('C:/Users/lloken/DOI/Corsi, Steven R - GLRI CECs/2016/Manuscripts/Pesticides_passive')
#load libraries
library(gridExtra)
library(RColorBrewer)
library(drake)
library(tidyverse)
library(googledrive)
library(readxl)
library(data.table)
library(toxEval)
library(ToxMixtures)
library(openxlsx)
library(dataRetrieval)
library(dplyr)
library(grid)
library(gridExtra)
#load custom functions
source('R/functions/g_legend.R')
source("custm_plot_tox_stacks.R")
# source('R/functions/ScaleYLog10Nice.R')
#Site order for plotting
site_order <- c("St. Louis", "Bad",
"Fox", "Milwaukee", "Indiana Harbor",
"St. Joseph", "Grand", "Saginaw",
"Clinton", "Rouge", "Maumee",
"Vermilion", "Cuyahoga", "Genesee", "Oswego")
#Load passive sampler data and generate toxEval file
#Saves toxeval file (excel) for select data, all data, and TQ benchmarks data
source('passive_data_setup_2016.R')
#Evaluate toxicity using ToxEval for passive data
#Loads two objects into environment (chemicalSummary and chemicalSummary_allpocis)
source('ToxEval_passive2016.R')
#Lots of figures to assess EAR for chemicals individually
source('Plot_PassiveTox_2016_includeUnknowns.R')
#Make map figure of EAR, number of chemicals detected with watersheds colored by % ag
source('map_sites_passive_2016.R')
#Scatterplots of land use versus chemical detections
source('R/Plot_scatterplots_perAg_chemicals.R')
#Evaluate toxicity using ToxEval for water samples data
source('ToxEval_watersamples2016.R')
#Plot number of chemicals by site, and number of sites by chemical barplots
# source('Plot_PassiveTox_2016.R')
#Plot number of chemicals by site, and number of sites by chemical barplots for water samples
# source('Plot_SurfaceTox_2016.R')
#Combine water and passive samples and compare.
# source('Compare_Plot_Passive_V_WaterSample.R')
#Script to analyze and plot mixtures passive
source('explore_toxmixtures_passive_2016_short.R')
#Script to analyze and plot mixtures surface water samples
source('explore_toxmixtures_surface_2016_short.R')
|
14846df8cac558e032cbdcf39930febaa4dd977a
|
6d2d195c56c7d123f1b5c36e99efa62c0e6ea754
|
/man/ehss.Rd
|
b27a2ee475ccc763bdc2bb6bc919a022049f3132
|
[] |
no_license
|
DKFZ-biostats/ESS
|
8502a6338c39a66246471f3651a6872c5f121af0
|
28b1b88662cb4d354a411e595e6e26bc9bed7ec3
|
refs/heads/master
| 2023-03-16T14:20:27.707341
| 2019-06-19T09:55:36
| 2019-06-19T09:55:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,477
|
rd
|
ehss.Rd
|
\name{ehss}
\alias{ehss}
\alias{ehss.normMix}
\alias{ehss.betaMix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Compute Effective Historical Sample Size (EHSS)
}
\description{
Compute Effective Historical Sample Size (EHSS). This is the prior effective sample size applied to the posterior subtracting the data sample size, see also Wiesenfarth and Calderazzo (2019).
}
\usage{
\method{ehss}{normMix}(prior, data, n, m, se, method = c("mix.moment", "moment", "morita"), ...)
\method{ehss}{betaMix}(prior, data, n, r, method = c("mix.moment", "moment", "morita"), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{prior}{
An RBesT mixture object
}
\item{data}{
individual data as in \code{\link[RBesT]{postmix}}. If the individual data is not given, then summary data has to be provided
}
\item{n}{
sample size}
\item{r}{
number of successes
}
\item{m}{
sample mean
}
\item{se}{
sample standard error
}
\item{method}{
Selects the used method. Can be either mix.moment, moment or morita.
%% ~~Describe \code{method} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
Simply applies \code{\link{ess}} to the posterior and subtracts the sample size.
}
%\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%}
\references{
Wiesenfarth, M., Calderazzo, S. (2019). Quantification of Prior Impact in Terms of Effective Current Sample Size. Submitted.
}
\author{
Manuel Wiesenfarth
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link[RBesT]{ess}, \link{ecss}
}
\examples{
######################
# Normal Outcome
# standard deviation
sigma=1
# baseline
rob=c(0,10)
vague <-mixnorm(vague=c(1, rob), sigma=sigma)
# prior with nominal EHSS=50
inf=c(0,1/sqrt(50))
info <-mixnorm(informative=c(1, inf), sigma=sigma)
# robust mixture
mix50 <-mixnorm(informative=c(.5, inf),vague=c(.5, rob), sigma=sigma)
m=.2 #data mean
n=100 # sample size
ehss(as.powerprior(info),m=m,n=n,se=sigma/sqrt(n))
ehss(mix50,m=m,n=n,se=sigma/sqrt(n),method="morita")
ehss(mix50,m=m,n=n,se=sigma/sqrt(n),method="moment")
ehss(mix50,m=m,n=n,se=sigma/sqrt(n),method="mix.moment")
}
|
5d9e01569be8276ca2c59c70ca73b23bc275cf16
|
862f7f896467575c5c16fe7cdff9eed2dcf49df9
|
/binder/install_.R
|
1f77133fa634e66ecfee3a0650fa37bf8e1c9ac0
|
[] |
no_license
|
schmudde/ptm
|
480c7a4e63dee1cdf00474fbadb94f0f009baafe
|
5888b65f0f8fcb51320e2f63dd0e1716867b711f
|
refs/heads/master
| 2020-06-01T00:54:02.480967
| 2019-04-02T16:56:02
| 2019-04-02T16:56:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
install_.R
|
install.packages("ggplot2")
install.packages("magrittr")
install.packages("dplyr")
install.packages("wordcloud")
install.packages("tm")
install.packages("png")
install.packages("data.table")
install.packages("reshape2")
install.packages("igraph")
install.packages("scales")
install.packages("lda")
install.packages("LDAvis")
install.packages("topicmodels")
install.packages("rJava")
install.packages("openNLP")
install.packages("RColorBrewer")
require(devtools)
install_version("quanteda", version = "1.3.4", repos='http://cran.us.r-project.org', dependencies=TRUE)
|
b876cf32f911e86b9f163e18a75c29e8b50c8c39
|
312d11a6dd935ba3ea61f5869413db38909731de
|
/sentiment_analysis.R
|
412def7dd4280864c8fb10c35dcc569c20bcd3b8
|
[] |
no_license
|
MarauderPixie/broken__spotify
|
e7190e54b776bb3de20fadafcf450b6a398ac158
|
f78b5ea48a9ac0e0d1183f5066bc3ce1253c7d72
|
refs/heads/master
| 2021-07-12T07:51:13.603452
| 2017-10-03T20:57:59
| 2017-10-03T20:57:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
sentiment_analysis.R
|
# merge spotify and genius data
## feels inefficient, maybe think about that later
top50l_filtered <- filter(top50l, str_to_lower(Track) %in% str_to_lower(lyrics$Track))
top50l_filtered$lower_track <- str_to_lower(top50l_filtered$Track)
lyrics$lower_track <- str_to_lower(lyrics$Track)
lyrics_reduced <- lyrics %>% select(lyrics, lower_track)
semi_final <- inner_join(top50l_filtered, lyrics_reduced, by = "lower_track") %>% select(-lower_track)
## on to sentiment analysis!
sad <- sentiments %>%
filter(lexicon == 'nrc', sentiment == 'sadness') %>%
select(word) %>%
mutate(sad = T)
angry <- sentiments %>%
filter(lexicon == 'nrc', sentiment == 'anger') %>%
select(word) %>%
mutate(angry = T)
joy <- sentiments %>%
filter(lexicon == 'nrc', sentiment == 'joy') %>%
select(word) %>%
mutate(joy = T)
# magic
sentimentals <- semi_final %>%
unnest_tokens(word, lyrics) %>%
anti_join(stop_words, by = 'word') %>%
left_join(sad, by = 'word') %>%
left_join(angry, by = 'word') %>%
left_join(joy, by = 'word') %>%
group_by(Track) %>%
summarise(
words = n(),
Sadness = round(sum(sad, na.rm = T) / n(), 4),
Joy = round(sum(joy, na.rm = T) / n(), 4),
Anger = round(sum(angry, na.rm = T) / n(), 4)
) %>%
ungroup()
# ok, cool. need to merge now. /sigh
final_l <- inner_join(semi_final, sentimentals, by = "Track") %>%
mutate(
Lyrical_density = words / Dur_sec,
Gloom_Index = ((1 - Valence) + Sadness * (1 + Lyrical_density)) / 2,
# these two are not quite right...
Contend_Index = ((1 - Danceability) + Joy * (1 + Lyrical_density)) / 2,
Volatile_Index = ((1 - Energy) + Anger * (1 + Lyrical_density)) / 2
)
# cleanup
rm(angry, joy, lyrics, lyrics_reduced, sad, semi_final, sentimentals, top50l, top50l_filtered)
|
695ef90954c3923926477be81da276991c4f42fc
|
4e05a0199c0eb916244d79879d0f46079774d75f
|
/R/Mo_1.R
|
83e73d568b33f66c8dc1f988e5475e830d1bcff6
|
[] |
no_license
|
ilangurudev/datathon_citadel
|
8adcd0b3001d1a4eb812866889490cb813a8aebb
|
862674af703501f4bacbb80b6adf8e70bf311f3e
|
refs/heads/master
| 2021-05-01T22:02:16.508974
| 2018-02-10T21:11:29
| 2018-02-10T21:11:29
| 120,984,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
Mo_1.R
|
# Mo 1
boroData <- read.table("nybb.csv", skip = 1)
#load raw data
weatherData <- read.table("data/weather.csv", header = TRUE, sep=",")
green <- read.table("data/green_trips_new_2.csv", header = TRUE, sep=",")
# Combined data of rides and weather
combined <- as.data.frame(c(green[1:2190,c(1, 3,4,7)] ,weatherData[,1:7]))
require(xgboost)
# precipitation range
precip_range = range( combined["precipitation"], na.rm = TRUE)
precipData = as.numeric(complete.cases(combined[c("pickup_datetime", "precipitation")]))
breaks = c(0, seq(0.5, 1, by = .1))
precip_level <- cut(precipData , breaks = seq(from=precip_range[1], to=precip_range[2], length.out = 10))
|
226b10ab1d30864453ed81aa6bdc281c52eeccb2
|
97e3baa62b35f2db23dcc7f386ed73cd384f2805
|
/man/tr.Rd
|
f3db8e6b2fa18ca135b64f78a21b0a4812cf4399
|
[] |
no_license
|
conservation-decisions/smsPOMDP
|
a62c9294fed81fcecc4782ac440eb90a299bca44
|
48b6ed71bdc7b2cb968dc36cd8b2f18f0e48b466
|
refs/heads/master
| 2021-06-25T22:23:31.827056
| 2020-10-27T08:56:07
| 2020-10-27T08:56:07
| 161,746,931
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,637
|
rd
|
tr.Rd
|
\name{tr}
\alias{tr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Transition matrix function
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Computes the transition matrix between states for each action : manage, survey and stop. State 1 : extant, state 2 : extinct
}
\usage{
tr(p0, pm, d0, dm, ds, V, Cm, Cs)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{p0}{
Local probability of persitance : P(extant/extant, survey or stop).
}
\item{pm}{
Local probability of persitance if manage : P(extant/extant, manage).
}
\item{d0}{
Local probability of detection : P(present/extant, stop).
}
\item{dm}{
Local probability of detection : P(present/extant, manage).
}
\item{ds}{
Local probability of detection if survey : P(present/extant, survey).
}
\item{V}{
%% ~~Describe \code{x} here~~
Estimated economic value of the species ($/yr).
}
\item{Cm}{
%% ~~Describe \code{x} here~~
Estimated cost of managing ($/yr).
}
\item{Cs}{
%% ~~Describe \code{x} here~~
Estimated cost of survey ($/yr).
}
}
\value{
%% ~Describe the value returned
Array, dimensions = [2,2,3], one matrix of dimensions 2,2 per action (manage, survey, stop, in that order)
}
\author{
Luz Pascal
}
\examples{
\dontrun{
#values for Sumatran tigers
pen <- 0.1
p0 <- 1-pen
pem <- 0.05816
pm <- 1 - pem
V <- 175.133
Cm <- 18.784
Cs <- 10.840
d0 <- 0.01
dm <- 0.01
ds <- 0.78193
#buiding the matrices of the problem
t <- smsPOMDP::tr(p0, pm, d0, dm, ds, V, Cm, Cs) #transition matrix
}
}
|
373e7b0d4bb36403f81f43d11e8cea2eb2d7eba5
|
1a619138a56d4cafd5ea7d2326b8b2be9a5a51f7
|
/WebScraping_Project.R
|
70601c3a8665c0885d780ecb38441e64dd8e3be5
|
[] |
no_license
|
elenayang528/Shiny
|
f32c64f17610ce94d3bb953552bbe5ed1747a644
|
caf21b51fc665d31719d37d48e97ac81de12c890
|
refs/heads/master
| 2022-05-26T04:40:02.235788
| 2020-04-30T14:30:56
| 2020-04-30T14:30:56
| 257,082,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,510
|
r
|
WebScraping_Project.R
|
library(corrplot)
library(ggplot2)
library(gplots)
library(ggpubr)
library(scales)
library(zoo)
library(corrplot)
library(readxl)
library(quantmod)
library(tidyr)
library(stringr)
library(rvest)
library(plyr)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(gplots)
library(ggpubr)
library(scales)
library(zoo)
#Download/Read the html: NCREIF--------------------------------------------------------------------------------
region<- c("N","E","W","S","M","H","A","R","I","O")
region_name <- c("National","East","West","South","Middle","Hotel","Apartment","Retail","Industrial","Office")
all_return=data.frame()
for(i in 1:length(region)){
url<-paste0("https://epitest.ncreif.org/property-index-returns.aspx?region=",region[i],"#farmland")
html<- read_html(url)
get_return<- html_nodes(html,"#farmland")
return_table <- html_table(get_return)
return<-data.frame(return_table)
return$region<-rep(region_name[i],nrow(return))
all_return<-rbind(all_return, return)
print(paste0("Finished Download Region: ", region[i]))
}
pct_to_number<- function(x){
x_replace_pct<-sub("%", "", x)
x_as_numeric<-as.numeric(x_replace_pct)
}
all_return$Quarter.1<- pct_to_number(all_return$Quarter.1)
all_return$Quarter.2<- pct_to_number(all_return$Quarter.2)
all_return$Quarter.3<- pct_to_number(all_return$Quarter.3)
all_return$Quarter.4<- pct_to_number(all_return$Quarter.4)
diff_return <- all_return %>% gather(Quarter, Return, Quarter.1:Quarter.4)
diff_return <-diff_return[order(diff_return$Year),]%>% spread(region, Return)
diff_return$Quarter <- str_extract(diff_return$Quarter, "\\d")
col_order <- c("Year", "Quarter", "National","East","West","South","Middle","Hotel","Apartment","Retail","Industrial","Office")
diff_return <- diff_return[, col_order] %>% unite("Year", Year:Quarter, sep = ":", remove = TRUE)
ncreif_return<- diff_return[grep("1989:1", diff_return$Year):grep("2016:2", diff_return$Year),]
ncreif <-data.frame(ncreif_return[1:2])
View(ncreif_return)
stats_ncreif<- ncreif_return[2:ncol(ncreif_return)]
summary(stats_ncreif)
cor <- round(cor(stats_ncreif),2)
cor
area <- ncreif_return[,c(1,3:6)]
area_plot<- reshape2::melt(area, id.var='Year')
ncreif_area <- ggplot(area_plot, aes(x=Year, y=value, col=variable)) + geom_line(group=1,size=1)+
labs(x = "Year", y = "%Return", title = "NCREIF Region Return")+
theme(axis.text.x=element_text(angle=90, vjust = 0.5))
ncreif_area
type <- ncreif_return[,c(1,7:11)]
type_plot<- reshape2::melt(type, id.var='Year')
ncreif_type <- ggplot(type_plot, aes(x=Year, y=value, col=variable)) + geom_line(group=1,size=1)+
labs(x = "Year", y = "%Return", title = "NCREIF Type Return")+
theme(axis.text.x=element_text(angle=90, vjust = 0.5))
ncreif_type
#Using quantmod to get stock return------------------------------------------------------------------------------
#sp500
sp500<- data.frame(getSymbols("^GSPC",auto.assign = FALSE, from = "1989-01-01",to= "2016-06-30"))
SP500<-quarterlyReturn(sp500)*100
names(SP500)[1]<- c("S&P500")
chartSeries(sp500)
#Wilhire5000
wilshire<- data.frame(getSymbols("^W5000",auto.assign = FALSE, from = "1989-01-01",to= "2016-06-30"))
WILSHIRE<-quarterlyReturn(wilshire)*100
names(WILSHIRE)[1]<- c("WILSHIRE")
chartSeries(wilshire)
stock_return<-cbind.data.frame(SP500,WILSHIRE)
#Nareit-------------------------------------------------------------------------------------------------------------
Nareit <- read_excel("C:/Users/JY Development Group/Desktop/Elena/nareit_return.xlsx")[3]
NAREIT <- unlist(Nareit)*100
#ACLI----------------------------------------------------------------------------------------------------
ACLI <- read_excel("C:/Users/JY Development Group/Desktop/Elena/ACLI.xlsx",sheet="Final")
acli<-ACLI[,c(6)]
names(acli)[1]<- c("ACLI")
#All data--------------------------------------------------------------------------------------------------------
all<- cbind.data.frame(ncreif,stock_return,NAREIT,acli)
rownames(all) <- all$Year
all_return<- all[2:ncol(all)]
#----------------------------------------------------------------------------------------------------------------
summary(all_return)
cor_all<-round(cor(all_return),2)
cor_all
hist(all_return$National,col="red", breaks=20,main = "National Return")
install.packages("PerformanceAnalytics")
library("PerformanceAnalytics")
chart.Correlation(all_return, histogram=TRUE, pch=19)
|
2b7e031ca53efeaa53fbe18bbcdd17fc5e543df0
|
204b1b2ebdce859adbf34e4c31debc4fa5129d4e
|
/GA-master/GA/R/Cross_over & Mutation.R
|
196a60b01059cd0f7bce59517fc401456f2a380f
|
[] |
no_license
|
esther730/stat243
|
4445a16b14ad48dd754a1b6659c793efd1c57649
|
6d438d8f916a6e3f2f811daf65d033bfd206881a
|
refs/heads/master
| 2021-03-22T05:25:23.327769
| 2017-12-31T06:44:50
| 2017-12-31T06:44:50
| 101,693,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,160
|
r
|
Cross_over & Mutation.R
|
#'Cross over
#'
#'Generate offsprings by randomly pick the position of chromosomes
#'@param parents list, parents to generate offsprings,length=2
#'@param dat dataframe, data to generate offspring(default sets the dependent variable in first column and independent varialbes is other columns)
#'@param fitfunc method, model selection method(default is AIC)
#'@param family family,for linear regression model the family should be a continuous probability density function (default is gaussian family)
#'@export
crossover = function(parents, dat, fitfunc="AIC", family="gaussian") {
## performs crossover between 2 parent chromosomes
## output: object of class "chromosome"
# parents: list of 2 "chromosome" objects
chromA = parents[[1]]
chromB = parents[[2]]
nVars = length(chromA$chrom)
pos = sample.int(nVars-1, size=1)
chrom = c(chromA$chrom[1:pos], chromB$chrom[(pos+1):nVars])
obj = initChrom(dat=dat, chrom=chrom, fitfunc=fitfunc, family=family)
return(obj)
}
#'Muatation
#'
#'Performs mutation on single chromosomes
#'@param chrom vector, a chromsome which a mutation is desired
#'@param nMutate integer, indicating how many mutations to perform on chromosome
#'@param dat dataframe, data to do mutation(default sets the dependent variable in first column and independent varialbes is other columns)
#'@param fitfunc method, model selection method(default is AIC)
#'@param family family,for linear regression model the family should be a continuous probability density function (default is gaussian family)
#'@export
mutateChrom = function(chrom, nMutate, dat, fitfunc="AIC", family="gaussian") {
## performs mutation on single chromosomes
## output: object of class "chromosome"
# chrom: object of class "chromosome"
# nMutate: number of mutations to perform on chromosome
nVars = length(chrom$chrom)
posMutate = sample.int(nVars, size=nMutate)
newChrom = chrom$chrom
newChrom[posMutate] = abs(newChrom[posMutate]-1)
obj = initChrom(dat=dat, chrom=newChrom, fitfunc=fitfunc, family=family)
return(obj)
}
#'Muatation
#'
#'Perform mutations on population
#'@param pop list, a population which mutation is desired
#'@param nMutate integer, indicating how many mutations to perform on chromosome
#'@param dat dataframe, data to do mutation(default sets the dependent variable in first column and independent varialbes is other columns) )
#'@param fitfunc method, model selection method(default is AIC)
#'@param family family,for linear regression model the family should be a continuous probability density function (default is gaussian family)
#'@export
mutatePop = function(pop, nMutations, fitfunc="AIC", family="gaussian") {
## performs mutations on population
## output: object of class "population"
# pop: object of class "population"
# nMutations: number of mutations to perform on each chromosome in pop
toMutate = which(nMutations > 0)
for(i in toMutate) {
pop$genomes[[i]] = mutateChrom(pop$genomes[[i]], nMutations[i], pop$data,
fitfunc=fitfunc, family=family)
}
return(pop)
}
|
646c2418f149304f81e6fd8014b80459f04842bd
|
3c2cb26f7c89c54ce5328522e6b752ccf6838061
|
/man/post_material_table.Rd
|
fefa177ec9600ff2eba523c1b4c9b004c3fab4c8
|
[] |
no_license
|
c5sire/fbmaterials
|
39b8a9682da33ed915d93382cf66bf07ef6e96bd
|
0af427d046e572e2320bf52426bec3728a60d413
|
refs/heads/master
| 2020-12-25T21:12:47.424718
| 2017-04-26T18:37:04
| 2017-04-26T18:37:04
| 43,533,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 485
|
rd
|
post_material_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_material_list.R
\name{post_material_table}
\alias{post_material_table}
\title{post_material_table}
\usage{
post_material_table(table_materials, crop, year, mlist_name, notes = NULL)
}
\arguments{
\item{table_materials}{a data frame}
\item{crop}{character}
\item{year}{year integer}
\item{mlist_name}{character}
\item{notes}{character}
}
\description{
store program table
}
\author{
Reinhard Simon
}
|
594a786b4e0e853939b40ea86564705fed3a91b7
|
ea524efd69aaa01a698112d4eb3ee4bf0db35988
|
/tests/testthat/test-compare.R
|
1d9f237bbb06faf0ee57a1f6a0ac3ca38ae9e483
|
[
"MIT"
] |
permissive
|
r-lib/testthat
|
92f317432e9e8097a5e5c21455f67563c923765f
|
29018e067f87b07805e55178f387d2a04ff8311f
|
refs/heads/main
| 2023-08-31T02:50:55.045661
| 2023-08-08T12:17:23
| 2023-08-08T12:17:23
| 295,311
| 452
| 217
|
NOASSERTION
| 2023-08-29T10:51:30
| 2009-09-02T12:51:44
|
R
|
UTF-8
|
R
| false
| false
| 6,188
|
r
|
test-compare.R
|
test_that("list comparison truncates to max_diffs", {
x <- as.list(as.character(1:1e3))
y <- lapply(x, paste0, ".")
lines1 <- strsplit(compare(x, y)$message, "\n")[[1]]
expect_length(lines1, 10)
lines2 <- strsplit(compare(x, y, max_diffs = 99)$message, "\n")[[1]]
expect_length(lines2, 100)
})
test_that("no diff", {
expect_equal(compare(1,1), no_difference())
})
test_that("vector_equal_tol handles infinity", {
expect_true(vector_equal_tol(Inf, Inf))
expect_true(vector_equal_tol(-Inf, -Inf))
expect_false(vector_equal_tol(Inf, -Inf))
expect_false(vector_equal_tol(Inf, 0))
})
test_that("vector_equal_tol handles na", {
expect_true(vector_equal_tol(NA, NA))
expect_false(vector_equal_tol(NA, 0))
})
# character ---------------------------------------------------------------
test_that("types must be the same", {
expect_match(compare("a", 1L)$message, "character is not integer")
})
test_that("base lengths must be identical", {
expect_match(compare("a", letters)$message, "1 is not 26")
})
test_that("classes must be identical", {
c1 <- "a"
c2 <- structure("a", class = "mycharacter")
expect_match(compare(c1, c2)$message, "'character' is not 'mycharacter'")
})
test_that("attributes must be identical", {
x1 <- "a"
x2 <- c(a = "a")
x3 <- c(b = "a")
x4 <- structure("a", a = 1)
x5 <- structure("a", b = 1)
expect_match(compare(x1, x2)$message, "names for current")
expect_match(compare(x2, x3)$message, "Names: 1 string mismatch")
expect_match(compare(x1, x4)$message, "target is NULL")
expect_match(compare(x4, x5)$message, "Names: 1 string mismatch")
})
test_that("two identical vectors are the same", {
expect_true(compare(letters, letters)$equal)
})
test_that("equal if both missing or both the same (multiple values)", {
expect_true(compare(c("ABC", NA), c("ABC", NA))$equal)
expect_false(compare(c(NA, NA), c("ABC", NA))$equal)
expect_false(compare(c("AB", NA), c("ABC", NA))$equal)
expect_false(compare(c("AB", "AB"), c("ABC", "AB"))$equal)
})
test_that("computes correct number of mismatches", {
x <- mismatch_character(c("a", "b", "c"), c("c", "d", "e"))
expect_equal(x$n, 3)
})
test_that("only differences are shown", {
x <- mismatch_character(letters, c(letters[-26], "a"))
lines <- strsplit(format(x), "\n")[[1]]
expect_equal(lines[1], "1/26 mismatches")
expect_equal(lines[2], 'x[26]: "z"')
})
test_that("not all lines are shown", {
a <- "1234567890"
b <- paste(rep(a, 10), collapse = "")
x <- mismatch_character(a, b)
lines <- strsplit(format(x, width = 16), "\n")[[1]]
expect_equal(lines[1], "1/1 mismatches")
expect_equal(length(lines), 8)
})
test_that("vectors longer than `max_diffs` (#513)", {
comp <- compare(letters[1:2], LETTERS[1:2], max_diffs = 1)
expect_s3_class(comp, "comparison")
expect_false(comp$equal)
expect_equal(comp$message, "2/2 mismatches\nx[1]: \"a\"\ny[1]: \"A\"")
})
# numeric ------------------------------------------------------------------
test_that("numeric types are compatible", {
expect_true(compare(1, 1L)$equal)
expect_true(compare(1L, 1)$equal)
})
test_that("non-numeric types are not compatible", {
expect_match(compare(1, "a")$message, "double is not character")
})
test_that("base lengths must be identical", {
expect_match(compare(1, c(1, 2))$message, "1 is not 2")
})
test_that("classes must be identical", {
f1 <- factor("a")
f2 <- factor("a", ordered = TRUE)
expect_match(compare(1L, f1)$message, "'integer' is not 'factor'")
expect_match(compare(1L, f2)$message, "'integer' is not 'ordered'/'factor'")
})
test_that("attributes must be identical", {
x1 <- 1L
x2 <- c(a = 1L)
x3 <- c(b = 1L)
x4 <- structure(1L, a = 1)
x5 <- structure(1L, b = 1)
expect_match(compare(x1, x2)$message, "names for current")
expect_match(compare(x2, x3)$message, "Names: 1 string mismatch")
expect_match(compare(x1, x4)$message, "target is NULL")
expect_match(compare(x4, x5)$message, "Names: 1 string mismatch")
})
test_that("unless check.attributes is FALSE", {
x1 <- 1L
x2 <- c(a = 1L)
x3 <- structure(1L, a = 1)
expect_equal(compare(x1, x2, check.attributes = FALSE)$message, "Equal")
expect_equal(compare(x1, x3, check.attributes = FALSE)$message, "Equal")
expect_equal(compare(x2, x3, check.attributes = FALSE)$message, "Equal")
})
test_that("two identical vectors are the same", {
expect_true(compare(1:10, 1:10)$equal)
})
test_that("named arguments to all.equal passed through", {
expect_equal(415, 416, tolerance = 0.01)
})
test_that("tolerance used for individual comparisons", {
x1 <- 1:3
x2 <- x1 + c(0, 0, 0.1)
expect_false(compare(x1, x2)$equal)
expect_true(compare(x1, x2, tolerance = 0.1)$equal)
})
test_that("mismatch_numeric truncates diffs", {
x <- mismatch_numeric(1:11, 11:1)
expect_equal(x$n, 11)
expect_equal(x$n_diff, 10)
lines <- strsplit(format(x, max_diffs = 5), "\n")[[1]]
expect_equal(length(lines), 5 + 2)
})
# time --------------------------------------------------------------------
test_that("both POSIXt classes are compatible", {
x1 <- Sys.time()
x2 <- as.POSIXlt(x1)
expect_true(compare(x1, x2)$equal)
expect_true(compare(x2, x1)$equal)
})
test_that("other classes are not", {
expect_match(compare(Sys.time(), 1)$message, "'POSIXct'/'POSIXt' is not 'numeric'")
})
test_that("base lengths must be identical", {
x1 <- Sys.time()
x2 <- c(x1, x1 - 3600)
expect_match(compare(x1, x2)$message, "1 is not 2")
})
test_that("tzones must be identical", {
t1 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "EST")
t2 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "US/Eastern")
expect_match(compare(t1, t2)$message, '"tzone": 1 string mismatch')
})
test_that("two identical vectors are the same", {
x <- Sys.time()
expect_true(compare(x, x)$equal)
})
test_that("two different values are not the same", {
x1 <- Sys.time()
x2 <- x1 + 3600
expect_false(compare(x1, x2)$equal)
})
test_that("uses all.equal tolerance", {
x1 <- structure(1457284588.83749, class = c("POSIXct", "POSIXt"))
x2 <- structure(1457284588.837, class = c("POSIXct", "POSIXt"))
expect_true(compare(x1, x2)$equal)
})
|
013213513c9adbe0bac12f35c859ad49d4de8d4a
|
acabe441d5bd5391ff0812169275c67128978c39
|
/tests/testthat/test_template_table_attributes.R
|
579712a5388cb324088d576acc9fa2c0ded43bcf
|
[
"MIT"
] |
permissive
|
Ashley-LW/EMLassemblyline
|
65d448ce6ee760f06904326ca2f3b9f4e475a85e
|
a37bc32c1feffa4f8a5ae88f158457fd05d4a86e
|
refs/heads/master
| 2022-12-10T17:30:00.850619
| 2020-09-08T23:02:38
| 2020-09-08T23:02:38
| 292,932,246
| 0
| 0
|
MIT
| 2020-09-04T19:38:35
| 2020-09-04T19:38:34
| null |
UTF-8
|
R
| false
| false
| 6,853
|
r
|
test_template_table_attributes.R
|
context('Create table attributes template')
library(EMLassemblyline)
# File inputs = two data tables -----------------------------------------------
testthat::test_that('Test usage with file inputs', {
# Missing path results in error
expect_error(
suppressMessages(
template_table_attributes(
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
write.file = FALSE
)
)
)
# Invalid data path results in error
expect_error(
suppressMessages(
template_table_attributes(
path = system.file(
'/examples',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
write.file = FALSE
)
)
)
# Invalid data tables result in error
expect_error(
suppressMessages(
template_table_attributes(
path = system.file(
'/examples',
package = 'EMLassemblyline'
),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decompppp.csv',
'nitrogennnnn.csv'
),
write.file = FALSE
)
)
)
# New imports result in messages
expect_message(
template_table_attributes(
path = system.file(
'/examples',
package = 'EMLassemblyline'
),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
write.file = FALSE
)
)
# Attempt to import templates when they already exist results in messages
expect_message(
template_table_attributes(
path = system.file(
'/examples/templates',
package = 'EMLassemblyline'
),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
write.file = FALSE
)
)
# write.file = TRUE writes files to path
file.remove(
paste0(
tempdir(),
'/attributes_decomp.txt'
)
)
file.remove(
paste0(
tempdir(),
'/attributes_nitrogen.txt'
)
)
file.remove(
paste0(
tempdir(),
'/custom_units.txt'
)
)
expect_message(
template_table_attributes(
path = tempdir(),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
write.file = TRUE
)
)
expect_true(
'custom_units.txt' %in% list.files(tempdir())
)
})
# x inputs = data tables ------------------------------------------------------
testthat::test_that('x inputs = data tables', {
# Make function call
x <- template_arguments(
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
)
)
x <- x$x
# Missing path results in messages
expect_message(
template_table_attributes(
x = x,
write.file = FALSE
)
)
# Missing path results in messages
expect_message(
template_table_attributes(
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
x = x,
write.file = FALSE
)
)
# Missing data path results in messages
expect_message(
template_table_attributes(
path = system.file(
'/examples',
package = 'EMLassemblyline'
),
x = x,
write.file = FALSE
)
)
# Valid data path and data tables results in messages
# expect_message(
# template_table_attributes(
# data.path = system.file(
# '/examples/data',
# package = 'EMLassemblyline'
# ),
# data.table = c(
# 'decomp.csv',
# 'nitrogen.csv'
# ),
# x = x,
# write.file = FALSE
# )
# )
# Valid data path and data tables result in addition of attributes templates
# with expected names, class, column names, and nrows > 1. Custom units
# template is also added.
output <- suppressMessages(
template_table_attributes(
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
x = x,
write.file = FALSE
)
)
attr_names <- paste0(
'attributes_',
stringr::str_remove(
string = names(output$data.table),
pattern = '.csv'
),
'.txt'
)
for (i in 1:length(attr_names)){
expect_equal(
attr_names[i] %in% names(output$template),
TRUE
)
expect_equal(
class(output$template[[attr_names[i]]]$content),
'data.frame'
)
expect_equal(
all(
colnames(output$template[[attr_names[i]]]$content) %in%
c('attributeName', 'attributeDefinition', 'class', 'unit',
'dateTimeFormatString', 'missingValueCode',
'missingValueCodeExplanation')
),
TRUE
)
expect_equal(
nrow(output$template[[attr_names[i]]]$content) > 1,
TRUE
)
}
expect_true(
'custom_units.txt' %in% names(output$template)
)
# Attempt to import templates when they already exist results in messages
expect_message(
template_table_attributes(
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
x = output,
write.file = FALSE
)
)
# Invalid column names result in error
input <- x
colnames(input$data.table$nitrogen.csv$content)[
colnames(input$data.table$nitrogen.csv$content) == 'stem_mass_density'
] <- 'stem.mass.density'
expect_error(
suppressMessages(
template_table_attributes(
path = tempdir(),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
x = input,
write.file = FALSE
)
)
)
# write.file = TRUE writes files to path
expect_message(
template_table_attributes(
path = tempdir(),
data.path = system.file(
'/examples/data',
package = 'EMLassemblyline'
),
data.table = c(
'decomp.csv',
'nitrogen.csv'
),
x = x,
write.file = TRUE
)
)
})
|
c3b06b5b69210e201ab370a54996aa0c9e76e753
|
74797c25ebc6f06fa01b608cb1ed6ce2574d3083
|
/ggplot/gg-histogram2.R
|
ca83cc5c998433352429cfab4cb08ae9cef3f6fc
|
[] |
no_license
|
anhnguyendepocen/Rgraphs
|
c533196de8e5d34a925bea11bd7c6b57d85d52b9
|
e306d8ccf06a99193b494aa2d55b11c48d36a28a
|
refs/heads/master
| 2022-02-15T17:07:38.394802
| 2019-07-05T14:51:04
| 2019-07-05T14:51:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,917
|
r
|
gg-histogram2.R
|
#histogram with labels
library(dplyr)
library(ggplot2)
library(reshape2)
#------------
(campus1 = paste('C', 1:6, sep='-'))
(institute1 = paste('Institute', 1:10, sep ='-'))
(program1 = paste('P',100:150, sep='-'))
runif(100)
n=10000
(rollno = 10000 + 0:(10000-1))
#-------------
campus = sample(campus1, size=n, replace=T, prob=c(3/10,2/10,2/10,2/10,1/10,1/10))
institute = sample(institute1, size=n, replace=T)
program = sample(program1, size=n, replace=T)
(sgpa = round(rnorm(mean=7, sd=.5, n=n),2))
(cgpa = round(rnorm(mean=6.5, sd=.6, n=n),2))
range(cgpa) ; range(sgpa)
result1 = data.frame(campus, institute, program, sgpa, cgpa)
catcols1 =c('campus','institute','program')
result1[,catcols1] = lapply(result1[,catcols1], as.factor)
str(result1)
#------------
hist(result1$sgpa)
hist(result1$sgpa, breaks=c(0,4,5,6,7,8,9,10))
#-------------
ggplot(result1, aes(x=sgpa)) + geom_histogram(bins=15)
#------------
ggplot(result1, aes(x=sgpa)) + stat_bin(bins=15) + stat_bin(bins=15, geom="text", aes(label=..count..), vjust=-1.5) + ylim(c(0,3000))
#-------- area
g1 = ggplot(result1, aes(x = sgpa))
g1 + geom_area(stat = "bin", color = "black", fill = "#00AFBB")
g1 + geom_area(aes(y = ..density..), stat = "bin")
g1 + geom_density()
g1 + geom_density(aes(fill = campus), alpha = 0.4)
mu = result1 %>% group_by(campus) %>% summarise(grp.mean =mean(sgpa, na.rm=T))
mu
g1 + geom_density(aes(fill = campus), alpha = 0.4) + geom_vline(data=mu, aes(xintercept = grp.mean, color = campus), linetype = "dashed")
#manually fill colors
g1a <- g1 + geom_density(aes(color = campus)) + geom_vline(data = mu, aes(xintercept = grp.mean, color = campus), linetype = "dashed") + theme_minimal()
g1a + scale_color_manual(values = 1:6)
#give colors here
#https://rstudio-pubs-static.s3.amazonaws.com/228019_f0c39e05758a4a51b435b19dbd321c23.html
#-------- with lines
g1 + geom_density(color = "black", fill = "gray") + geom_vline(xintercept = c(min(sgpa), mean(sgpa), max(sgpa)), color = 1:3, linetype = 1:3, size = 1)
#-----
g2 = ggplot(result1, aes(x =sgpa, fill = campus))
g2 + geom_bar(stat = "bin")
g
#-----------------
g3 = ggplot(result1, aes(x = sgpa))
g3 + geom_histogram(aes(y = ..density..), bins = 50)
# Change color by campus
g3 + geom_histogram(aes(color = campus), fill = "white", bins = 50) + theme_minimal()
# Position adjustment "identity"(overlaid)
g3 + geom_histogram(aes(color = campus), fill = "white", bins = 50, alpha = 0.6, position = "identity")
# Position adjustment "dodge" (Interleaved)
# Add mean lines and color by campus
g1 + geom_histogram(aes(color = campus), fill = "white", alpha = 0.6, position = "dodge", bins = 50) + geom_vline(aes(xintercept = mean(sgpa)), linetype = "dashed")
# Change fill, color manually
# Change outline color manually
g1 + geom_histogram(aes(color = campus), fill = "white", alpha = 0.4, position = "identity", bins = 50) + scale_color_manual(values = c("#00AFBB","#E7B800", 1:4))
# Histogram with density plot
g1 + geom_histogram(aes(y = ..density..),color = "black", fill = "white") + geom_density(alpha = 0.2, fill = "#FF6666") + theme_minimal()
# Color by groups
g1 + geom_histogram(aes(y = ..density.., color = campus, fill = campus), alpha = 0.4, position = "identity") + geom_density(aes(color = campus), size =1)
g3 + geom_dotplot(aes(fill = campus), bins=15)
#----
g3 + stat_ecdf(geom = "point")
g3 + stat_ecdf(geom = "step")
# Basic plot
#---
g4 = ggplot(result1, aes(sample = sgpa))
g4 + stat_qq()
# Change point shapes by groups
# Use custom color palettes
g4 + stat_qq(aes(color = campus))
g4 + stat_qq(aes(shape = campus, color = institute))
#----color histogram
ggplot(result1, aes(x=sgpa, fill = cut(sgpa, 10))) + geom_histogram()
ggplot(result1, aes(sgpa, fill = cut(sgpa, 10))) + geom_histogram(show.legend = FALSE)
ggplot(result1, aes(sgpa, fill = cut(sgpa, 10))) + geom_histogram(show.legend = FALSE, bins=15) + scale_fill_discrete(h = c(200, 10))
#https://drsimonj.svbtle.com/pretty-histograms-with-ggplot2
ggplot(d, aes(x, fill = cut(x, 100))) + geom_histogram(show.legend = FALSE) + scale_fill_discrete(h = c(240, 10), c = 120, l = 70)
gplot(d, aes(x, fill = cut(x, 100))) + geom_histogram(show.legend = FALSE) + scale_fill_discrete(h = c(240, 10), c = 120, l = 70) + theme_minimal() + labs(x = "Variable X", y = "n") + ggtitle("Histogram of X")
#facet
#https://www.sharpsightlabs.com/blog/ggplot-histogram/
g5= ggplot(result1, aes(x=sgpa, fill=campus)) + stat_bin(bins=7) + stat_bin(bins=7, geom="text", aes(label=..count..), size=2, vjust=-1.5) + ylim(c(0,1500))
g5 + facet_grid(campus ~ .) + scale_color_manual(values = 1:7)
g5b= ggplot(result1, aes(x=sgpa, fill=institute)) + stat_bin(bins=7) + stat_bin(bins=7, geom="text", aes(label=..count..), size=2, vjust=-1.5) + ylim(c(0,1500))
g5b + facet_wrap( institute ~ .) + scale_color_manual(values = 1:7)
|
658c06e9b636dc71626a6ac84f7367904a21c662
|
b94cac0d913688de392970bbf985651e2f6fd447
|
/man/read_growth.Rd
|
0fbb0bf0b3101f43ffd40c82935ee089373e7e8b
|
[] |
no_license
|
kamapu/treegrowth
|
79406372154858477d3df22530f78025f5375f7f
|
59420ac1c39efef80765d4303f9345cbd1b51daa
|
refs/heads/master
| 2021-03-20T21:30:40.814252
| 2020-03-23T10:12:22
| 2020-03-23T10:12:22
| 247,236,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 933
|
rd
|
read_growth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_growth.R
\name{read_growth}
\alias{read_growth}
\title{Read growth data and produce an ODB file}
\usage{
read_growth(xlsx, odb = ".temp.odb", format_date = "\%d.\%m.\%Y", ...)
}
\arguments{
\item{xlsx}{Character value with the path and/or name of the XLSX input file.}
\item{odb}{Character value indicating the path and/or name of output ODB
file.}
\item{format_date}{Character value indicating format of date entries (see
\code{\link[=xlsx2list]{xlsx2list()}}).}
\item{...}{Further arguments (not yet in use).}
}
\value{
A data frame and an ODB file as in \code{\link[=odb2df]{odb2df()}}.
}
\description{
Read row data and export to a single data frame using an ODB file as
intermediating file.
This function applies the sequence \code{\link[=xlsx2list]{xlsx2list()}} -> \code{\link[=list2odb]{list2odb()}} ->
\code{\link[=odb2df]{odb2df()}}.
}
|
597bb75a7e590937e2187b9da877a9eace66d209
|
8131d16c335c651d96c2e40b6a4a026a1e79109c
|
/R/Rao.R
|
fcb0ae97a0b03c12d545c9def08ceb8de4526cea
|
[] |
no_license
|
mattmar/rasterdiv
|
dc7812f15a4ebb98b1b35def22f14f5092a86864
|
5dd4383d7b7360073c8612754d94285a4bd340d4
|
refs/heads/master
| 2023-05-11T19:15:35.369030
| 2023-05-10T09:38:22
| 2023-05-10T09:38:22
| 252,508,233
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 447
|
r
|
Rao.R
|
Rao = function(x, dist_m="euclidean", window=9, rasterOut = TRUE, mode="classic",lambda=0, shannon=FALSE, rescale=FALSE, na.tolerance=1.0, simplify=2, np=1, cluster.type="SOCK",debugging=FALSE) {
.Deprecated(new = "paRao(..., alpha=1)")
paRao(x=x, dist_m=dist_m, window=window, method=mode, alpha=1, lambda=lambda, na.tolerance=na.tolerance, rescale=rescale, diag=TRUE, simplify=simplify, np=np, cluster.type=cluster.type, debugging=debugging)
}
|
5557ba4ab0a2203cc01a020eff6d4f68d3831b3d
|
513517886e62467fe7ce3df576a380a05fb209e4
|
/packages.R
|
e55cfc61de4ab6950a2362b830bbf4ef614894e5
|
[] |
no_license
|
tomvdbussche/ozt-solutions
|
e490140d1c3db95e7442ba42f7b81d56d752c27d
|
6f52fbec504d665d6368b0a7adaea15164b4081d
|
refs/heads/master
| 2020-12-25T18:52:38.142014
| 2017-08-22T11:29:49
| 2017-08-22T11:29:49
| 94,005,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
r
|
packages.R
|
# Installs required packages (only installs missing packages)
pkgs <- c(
"car",
"gmodels",
"lsr",
"RcmdrMisc",
"sp",
"raster"
)
missing <- setdiff(pkgs, rownames(installed.packages()))
if (length(missing) > 0) {
install.packages(missing)
}
|
eb18b36f1e30b86165bdd2873ab7eda30b5bf654
|
145bb5d044669b136e37f3c5d966696e82dc457e
|
/testEachStratumTime.r
|
de5da190d3966e86b0e7325e5166e0680450a75c
|
[] |
no_license
|
epinor/Discrete_curve_group_code
|
89958ec05e02d361672fe705dc9525b8e072b34b
|
fc90b44e5cba252a57c3176e2f216759d85beab2
|
refs/heads/master
| 2021-06-14T16:50:32.082938
| 2017-03-20T10:03:22
| 2017-03-20T10:03:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,319
|
r
|
testEachStratumTime.r
|
## ============================================================
## Aim: Check if there are more genes in a curve group than expected
## ============================================================
strataNames <- c("WithSpread", "WithoutSpread")
strataNames2 <- c("With spread", "Without spread")
realData <- list()
realData[[1]] <- list(dataPeriod1Spr,
dataPeriod2Spr,
dataPeriod3Spr)
realData[[2]] <- list(dataPeriod1NotSpr,
dataPeriod2NotSpr,
dataPeriod3NotSpr)
names(realData) <- strataNames
## Test hypothesis for each stratum
limitPval <- constantList$limitPval
limitPvalVec <- c((1:9)*0.0001, (1:9)*0.001, (1:9)*0.01, (1:9)*0.1, 1.0)
nofSim <- constantList$nofSim
nofPeriods <- constantList$nofPeriods
nofStrata <- length(realData)
nofGenes <- nrow((realData[[1]])[[1]])
nofGroups <- factorial(nofPeriods)
curveNames <- defineCurveClasses(nofPeriods)
resMat <- resMat2 <- matrix(NA, nrow=length(curveNames)+1, ncol=nofStrata)
for (i in 1:nofStrata) {
print(paste(Sys.time(), "-- Real and sim. data -- stratum", strataNames[i]))
rData <- realData[[i]]
statOneStratum <- computeStatisticsOneStratum(rData)
curveCodeAndPval <- computeCurveCodeAndPval(statOneStratum)
tstatPval <- curveCodeAndPval$pVal
curveCode <- curveCodeAndPval$curveCode
tstatPvalSim <- curveCodeSim <- matrix(NA, ncol=nofGenes, nrow=nofSim)
for (sim in 1:nofSim) {
if (sim%%10==1)
print(paste(" ", Sys.time(), "-- Simulation", sim, "of", nofSim, "...."))
sData <- simulateData(rData)
statOneStratum <- computeStatisticsOneStratum(sData)
curveCodeAndPval <- computeCurveCodeAndPval(statOneStratum)
tstatPvalSim[sim,] <- curveCodeAndPval$pVal
curveCodeSim[sim,] <- curveCodeAndPval$curveCode
}
## Compute p-values and number of genes for resMat and resMat2
## Global
pGlobal <- rep(NA, length(limitPvalVec))
selInd <- which(limitPvalVec==limitPval)
Tstat <- sum(tstatPval<limitPval)
TstatExp <- mean(rowSums(tstatPvalSim<limitPval))
for (k in 1:length(limitPvalVec)) {
TstatCur <- sum(tstatPval<limitPvalVec[k])
TstatSim <- rowSums(tstatPvalSim<limitPvalVec[k])
pGlobal[k] <- signif((sum(TstatCur<=TstatSim)+1)/(nofSim+1),2)
}
## Each curve group
pVal <- matrix(NA, nrow=length(limitPvalVec), ncol=nofGroups)
Gvec <- GvecExp <- rep(NA, nofGroups)
for (j in 1:nofGroups) {
Gvec[j] <- sum(tstatPval<limitPval & curveCode==curveNames[j])
GSim <- rep(NA, nofSim)
for (sim in 1:nofSim)
GSim[sim] <- sum(tstatPvalSim[sim,]<limitPval & curveCodeSim[sim,]==curveNames[j])
GvecExp[j] <- mean(GSim)
for (k in 1:length(limitPvalVec)) {
G <- sum(tstatPval<limitPvalVec[k] & curveCode==curveNames[j])
GSim <- rep(NA, nofSim)
for (sim in 1:nofSim)
GSim[sim] <- sum(tstatPvalSim[sim,]<limitPvalVec[k] & curveCodeSim[sim,]==curveNames[j])
pVal[k,j] <- signif((sum(G<=GSim)+1)/(nofSim+1),2)
}
}
resMat[,i] <- paste(c(pGlobal[selInd], pVal[selInd,]), sep="")
resMat2[,i] <- paste(c(Tstat, Gvec), " (", round(c(TstatExp, GvecExp)), ")", sep="")
## Make plots of p-values
pdf(paste("Res/EachStratum/plotStratum", strataNames[i], ".pdf", sep=""))
plot(limitPvalVec, pGlobal, type="l", log="xy", ylim=c(0.0001,100),
xlab="alpha",
ylab="p-value",
col=1, main= strataNames2[i], cex.main=2.0, cex.axis=1.6, cex.lab=1.6)
for (j in 1:nofGroups)
lines(limitPvalVec, pVal[,j], col=j, lty=2)
legend(c(0.002,0.1),c(1.05,100),
legend=c("Global", paste("CurveGroup", curveNames)), col=c(1, 1:nofGroups), lty=c(1,rep(2,nofGroups)))
abline(h=0.05, lty=3)
dev.off()
}
resMat <- cbind(c("Global", curveNames), resMat)
resMat <- rbind(c("CurveGroup", strataNames), resMat)
write.table(resMat, "Res/EachStratum/resTableTestEachStratumTime.txt",
col.names=F, row.names=F, quote=F, sep="\t")
resMat2 <- cbind(c("Global", curveNames), resMat2)
resMat2 <- rbind(c("CurveGroup", strataNames), resMat2)
write.table(resMat2, "Res/EachStratum/resTable2TestEachStratumTime.txt",
col.names=F, row.names=F, quote=F, sep="\t")
|
1d32d3120b534016973a70447051183ea1840713
|
277dbb992966a549176e2b7f526715574b421440
|
/R_training/실습제출/전나영/191023/lab_05.R
|
9fa2696181b1c1ac7f49ba60d017732fee4f8921
|
[] |
no_license
|
BaeYS-marketing/R
|
58bc7f448d7486510218035a3e09d1dd562bca4b
|
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
|
refs/heads/master
| 2020-12-11T04:30:28.034460
| 2020-01-17T08:47:38
| 2020-01-17T08:47:38
| 227,819,378
| 0
| 0
| null | 2019-12-13T12:06:33
| 2019-12-13T10:56:18
|
C++
|
UTF-8
|
R
| false
| false
| 946
|
r
|
lab_05.R
|
# 문제1
grade <- sample(1:6, 1)
if(grade <= 3){
cat(grade, "학년은 저학년입니다.\n")
}else{
cat(grade, "학년은 고학년입니다.\n")
}
# 문제2
choice <- sample(1:5, 1)
if(choice==1){
cat("결과값 :", 300+50)
}else if(choice==2){
cat("결과값 :", 300-50)
}else if(choice==3){
cat("결과값 :", 300*50)
}else if(choice==4){
cat("결과값 :", 300/50)
}else{
cat("결과값 :", 300%%50)
}
# 문제3
count <- sample(3:10, 1)
deco <- sample(1:3, 1)
if(deco==1){
rep("*", count)
}else if(deco==2){
rep("$", count)
}else{
rep("#", count)
}
#문제4
score <- sample(0:100, 1)
score2 <- score%/%10
score2 <- as.character(score2)
level <- switch(EXPR = score2,
"10"=, "9"="A 등급",
"8"="B 등급",
"7"="C 등급",
"6"="D 등급",
"F")
cat(score, "점은 ", level, "입니다\n", sep="")
#문제5
big <- LETTERS[1:26]
small <- letters[1:26]
alpha <- paste(big, small, sep = "")
|
ef150663fc0e36a12a826237d26b695e4b369f0b
|
631f41c200b59e6babd378a959ddbcb95bda6df0
|
/Gaussian_Random_Walk.R
|
784a08a875b56ce592e156315d7663de02014b59
|
[] |
no_license
|
aqknutsen/Stocks_ARIMA
|
59141d6f6c50c83b6fef997f6c79e579e295645a
|
87a376ce4c06a808eaf84a685e2840af52b2d655
|
refs/heads/master
| 2020-04-15T19:10:29.451161
| 2017-10-12T18:30:38
| 2017-10-12T18:30:38
| 42,086,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
Gaussian_Random_Walk.R
|
#Vector to store partial sums and time
x<-vector()
my_time<-vector()
#Start at the origin and time 0
x[1]=0
my_time[1]=0
i = 2
#Generate 10000 steps
while(i<=10000) {
#Generate a random variable from the inverse gaussian distribution and add it to the partial sum
u1 = runif(1,0,1)
x[i] = x[i-1] + qnorm(u1,0,1)
#Store the time
my_time[i] = i-1;
#Iterate
i = i + 1
}
plot(my_time,x)
|
698a5590d7a349d3db0ecde4066008fa4ec77b1b
|
e08f48bc3526fa30bc690f9c6f0ce1a0fffffdcd
|
/man/t.test.p.value.Rd
|
6aaeb8dfa55b39d6a6342a5ac33a7592841bff48
|
[] |
no_license
|
cran/demoGraphic
|
1aada0d1ff141ec89759be7ef0d9ca4e1d38edfd
|
c4e9e0372775ff30241dc18393b988556b9f25c5
|
refs/heads/master
| 2020-04-15T18:17:09.966830
| 2019-01-09T16:30:07
| 2019-01-09T16:30:07
| 164,908,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 312
|
rd
|
t.test.p.value.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demo_Graphic.R
\name{t.test.p.value}
\alias{t.test.p.value}
\title{t.test to calculate p value}
\usage{
\method{t}{test.p.value}(...)
}
\arguments{
\item{...}{variables}
}
\value{
p value
}
\description{
t.test to calculate p value
}
|
459740511c45118c17bbb7379d232fe555ab1ea0
|
101c7ee80526ab15b90e7e1e09726a4b6052c02e
|
/Week04/Assignment_4.3.r
|
8c816ba708c3814b0dbf3b250249d0f41e63a54a
|
[
"MIT"
] |
permissive
|
abo1/mit15.071x
|
d18d5af21b002c2acc8f9a052ab2b4c2ae7e2676
|
30dfc1174b187636314249ecfb4954ac95bbbd91
|
refs/heads/master
| 2021-01-15T11:14:41.800869
| 2014-05-03T11:27:58
| 2014-05-03T11:27:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,812
|
r
|
Assignment_4.3.r
|
data(state)
statedata = data.frame(state.x77)
str(statedata)
linState = lm(Life.Exp ~ ., data = statedata)
summary(linState)
linStateResp = predict(linState)
sse = sum((linStateResp - statedata$Life.Exp)^2)
sse
linState2 = lm(Life.Exp ~ Population + Murder + Frost + HS.Grad, data = statedata)
linStateResp2 = predict(linState2)
sse = sum((linStateResp2 - statedata$Life.Exp)^2)
summary(linState2)
sse
cor(statedata)
stateTree = rpart(Life.Exp ~ ., data=statedata)
stateTree
prp(stateTree)
stateTreeResp = predict(stateTree)
sum((stateTreeResp - statedata$Life.Exp)^2)
stateTree2 = rpart(Life.Exp ~ ., data=statedata, control=rpart.control(minbucket=5))
prp(stateTree)
prp(stateTree2)
prp(rpart(Life.Exp ~ ., data=statedata, control=rpart.control(minbucket=5)))
prp(rpart(Life.Exp ~ ., data=statedata, control=rpart.control(minbucket=20)))
prp(rpart(Life.Exp ~ ., data=statedata, control=rpart.control(minbucket=1)))
stateTreeResp2 = predict(stateTree2)
sum((stateTreeResp2 - statedata$Life.Exp)^2)
stateTree3 = rpart(Life.Exp ~ Area, data=statedata, control=rpart.control(minbucket=5))
stateTreeResp3 = predict(stateTree3)
sum((stateTreeResp3 - statedata$Life.Exp)^2)
stateTree3 = rpart(Life.Exp ~ Area, data=statedata, control=rpart.control(minbucket=1))
stateTreeResp3 = predict(stateTree3)
sum((stateTreeResp3 - statedata$Life.Exp)^2)
library(caret)
library(e1071)
set.seed(111)
fitControl = trainControl(method="cv", number=10)
cartGrid = expand.grid(.cp=(1:50)*0.01)
train(Life.Exp ~ ., data=Train, method="rpart", trControl=fitControl, tuneGrid = cartGrid)
train(Life.Exp ~ ., data=statedata, method="rpart", trControl=fitControl, tuneGrid = cartGrid)
stateTree5 = rpart(Life.Exp ~ ., data=statedata, control=rpart.control(.cp=0.12))
prp(stateTree5)
stateTreeResp5 = predict(stateTree5)
sum((stateTreeResp5 - statedata$Life.Exp)^2)
stateTreeResp5 = predict(stateTree5, type="response")
stateTreeResp5 = predict(stateTree5)
sum((stateTreeResp5 - statedata$Life.Exp)^2)
fix(stateTreeResp5)
stateTreeResp5
stateTreeResp4
stateTreeResp3
prp(stateTree5)
set.seed(111)
train(Life.Exp ~ ., data=statedata, method="rpart", trControl=fitControl, tuneGrid = cartGrid)
stateTree5 = rpart(Life.Exp ~ ., data=statedata, control=rpart.control(.cp=0.12))
pred5 = predict(stateTree5)
sum((pred5 - statedata$Life.Exp)^2)
set.seed(111)
train(Life.Exp ~ Area, data=statedata, method="rpart", trControl=fitControl, tuneGrid = cartGrid)
stateTree6 = rpart(Life.Exp ~ Area, data=statedata, control=rpart.control(.cp=0.02))
prp(stateTree6)
CARTmodel5 = rpart(Life.Exp ~ ., data=statedata, cp=0.12)
pred5 = predict(CARTmodel5)
sum((pred5 - statedata$Life.Exp)^2)
stateTree6 = rpart(Life.Exp ~ Area, data=statedata, cp = 0.02)
pred6 = predict(stateTree6)
sum((pred6 - statedata$Life.Exp)^2)
###############DONE###################
|
ccb1d4895b45275af88231aacb2e17e177569f80
|
a2963e83ea2de81ae421d76fa3959139b7f753e8
|
/05.Topic-IV.Project-5.Manufacturer-Retailer-Price.CausalRelationDiscovery.distribution/zctaylor_causality_project.R
|
b8d9800b0a73ed9491521c87dd1828ed41d3ffa5
|
[] |
no_license
|
wuzhongdehua/data-guided-business-intel
|
2bc456fdf492c634e377f54cc3ae242c95ca21b6
|
edca4696acf699fb5118f02d897f17a9692bc45e
|
refs/heads/master
| 2020-12-25T13:23:17.277186
| 2016-05-05T08:40:34
| 2016-05-05T08:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,751
|
r
|
zctaylor_causality_project.R
|
# Load the libraries
library('vars')
library('urca')
library('pcalg')
library('dgof')
require(dgof)
# Read the input data
data.path <- "/home/zachncst/classes/csc591/projects/05.Topic-IV.Project-5.Manufacturer-Retailer-Price.CausalRelationDiscovery.distribution/Input Data/data.csv"
data.csv <- read.csv(data.path)
summary(data.csv)
# Build a VAR model
var.model <- VAR(data.csv, p=1, type="const", ic="Schwartz")
summary(var.model)
var.model
plot(var.model)
# Extract the residuals from the VAR model
var.residuals <- residuals(var.model)
var.residuals
# Check for stationarity using the Augmented Dickey-Fuller test
move_dickey <- ur.df(var.residuals[,1])
rprice_dickey <- ur.df(var.residuals[,2])
mprice_dickey <- ur.df(var.residuals[,3])
summary(move_dickey)
summary(rprice_dickey)
summary(mprice_dickey)
#All values are less than the creitical value -1.95 at 5 percent (0.05)
# Check whether the variables follow a Gaussian distribution
x <- rnorm(length(var.residuals))
move_test <- dgof::ks.test(var.residuals[,1], x, p.value=0.05)
rprice_kstest <- ks.test(var.residuals[,2], x, p.value=0.05)
mprice_kstest <- ks.test(var.residuals[,3], x, p.value=0.05)
move_test$p.value < 0.05
rprice_kstest$p.value < 0.05
mprice_kstest$p.value < 0.05
write.csv(var.residuals, file = "./residuals.csv", row.names = F)
# Write the residuals to a csv file to build causal graphs using Tetrad software
#Used the tetrad software but here is the code to print some of it
# PC algorithm
data <- var.residuals
suffStat=list(C=cor(data), n=1000)
pc_fit <- pc(suffStat, indepTest=gaussCItest, alpha=0.05, labels=colnames(data), skel.method="original")
pc_fit
plot(pc_fit, main="PC Output")
# LiNGAM algorithm
lingam_fit <- LINGAM(data)
show(lingam_fit)
|
6d9a2476c25a56b9ddc1616841ae56057773e160
|
20ed57666ba391ca68f9c814ec26370b7c7a4797
|
/man/gs_ws_new.Rd
|
bee7b27bc935d5dc8e19b5df12f26f338c4d4582
|
[] |
no_license
|
colinloftin-awhere/googlesheets
|
5c59a241107a39ca2b52442c92e7266d7357100b
|
9a591eef0cd64f10a98371ed95d06f28b4744a27
|
refs/heads/master
| 2021-01-23T03:35:08.902747
| 2017-02-10T16:41:18
| 2017-02-10T16:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,466
|
rd
|
gs_ws_new.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gs_ws.R
\name{gs_ws_new}
\alias{gs_ws_new}
\title{Add a new worksheet within a spreadsheet}
\usage{
gs_ws_new(ss, ws_title = "Sheet1", row_extent = 1000, col_extent = 26,
..., verbose = TRUE)
}
\arguments{
\item{ss}{a registered Google spreadsheet, i.e. a \code{\link{googlesheet}}
object}
\item{ws_title}{the title for the new, sole worksheet; if unspecified, the
Google Sheets default is "Sheet1"}
\item{row_extent}{integer for new row extent; if unspecified, the Google
Sheets default is 1000}
\item{col_extent}{integer for new column extent; if unspecified, the Google
Sheets default is 26}
\item{...}{optional arguments passed along to \code{\link{gs_edit_cells}} in
order to populate the new worksheet with data}
\item{verbose}{logical; do you want informative messages?}
}
\value{
a \code{\link{googlesheet}} object
}
\description{
Add a new worksheet to an existing spreadsheet. By default, it will [1] have
1000 rows and 26 columns, [2] contain no data, and [3] be titled "Sheet1".
Use the \code{ws_title}, \code{row_extent}, \code{col_extent}, and \code{...}
arguments to give the worksheet a different title or extent or to populate it
with some data. This function calls the
\href{https://developers.google.com/drive/v2/reference/}{Google Drive API} to
create the worksheet and edit its title or extent. If you provide data for
the sheet, then this function also calls the
\href{https://developers.google.com/google-apps/spreadsheets/}{Google Sheets
API}. The title of the new worksheet can not be the same as any existing
worksheet in the sheet.
}
\details{
We anticipate that \strong{if} the user wants to control the extent of the
new worksheet, it will be by providing input data and specifying `trim =
TRUE` (see \code{\link{gs_edit_cells}}) or by specifying \code{row_extent}
and \code{col_extent} directly. But not both ... although we won't stop you.
In that case, note that explicit worksheet sizing occurs before data
insertion. If data insertion triggers any worksheet resizing, that will
override any usage of \code{row_extent} or \code{col_extent}.
}
\examples{
\dontrun{
# get a copy of the Gapminder spreadsheet
gap_ss <- gs_copy(gs_gap(), to = "Gapminder_copy")
gap_ss <- gs_ws_new(gap_ss)
gap_ss <- gs_ws_delete(gap_ss, ws = "Sheet1")
gap_ss <-
gs_ws_new(gap_ss, ws_title = "Atlantis", input = head(iris), trim = TRUE)
gap_ss
gs_delete(gap_ss)
}
}
|
1b58302734e3b25f1e4d2584008830a180c985ba
|
a11071996d946951e33ca81be9ad1866d1514bb1
|
/cachematrix.R
|
c9928a61f1bcdfd1ea8156a27fa460f04e5e9e14
|
[] |
no_license
|
StanfordB3/datasciencecoursera
|
76f7d35c2a921c7678533b23775fce7f6ac4816c
|
3e8b023ade47786bde16cfec165ba72f05670888
|
refs/heads/master
| 2021-01-10T15:46:14.258445
| 2016-04-18T06:45:50
| 2016-04-18T06:45:50
| 55,576,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,782
|
r
|
cachematrix.R
|
# The makeCacheMatrix defines a set of fuction and returs it as a list
makeCacheMatrix <- function(x = matrix()) {
# Note: Make sure to set matrix M in the Global environment before using
m <- NULL # "m" is the inverse of Matrix "x"
# SetMatrix should be called for a new Matrix ONLY
# Set the value of a Matrix (For a new matrix "x")
setMatrix <- function(y) {
M <<- y # "y" is an input matrix whose inverse is being evaluated
mI <<- NULL # Inverse of M (y) being set in Global environment
}
# Get the value of Matrix M from Global environment
getMatrix <- function() M # Returns the value of Global value of M
# Set the valye of Inverse matrix in the Memory
setIMatrix <- function(InverseMatrix) {
m <<- InverseMatrix # Having calculated the inverse update its value
}
# Get the current value invers "m" from Global environment
getIMatrix <- function() m # Here m is a free variable (Lexical scoping)
# The list of functions is returned by this function
list(setMatrix = setMatrix, getMatrix = getMatrix,
setIMatrix = setIMatrix,
getIMatrix = getIMatrix)
}
cacheSolve <- function(x, ...) {
# Check the value of the value of inverse stored in memory
m <- x$getIMatrix()
# When inverse exist, it's value is returned
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# "data" is the current value of matrix in question
data <- x$getMatrix()
# Calculates the inverse of the matric "data"
m <- solve(data)
# Now set the value of inverse in Global environment for future use
x$setIMatrix(m)
# Returns the value of inverse
m
}
|
6ace857e11977fd56dc5afc1bcd59d9f01b41907
|
3bb7d9054e970ad99dcf8c80fb64633ecdb85deb
|
/src/01-experiment-first_animation.R
|
e38be23c79a4320d2596c24d671d9399374d87e9
|
[] |
no_license
|
jschoeley/rore2021-challenge
|
2dcdbaf5ca906dc5af84ce31249fd5d41e6617d9
|
6056863174148e91068506804529e7a5fbca17fc
|
refs/heads/master
| 2023-06-03T20:49:42.072046
| 2021-06-23T09:39:04
| 2021-06-23T09:39:04
| 379,325,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 409
|
r
|
01-experiment-first_animation.R
|
# Animate country rankings of population size
# Jonas Schöley
library(tidyverse)
library(gganimate)
rank_sim <- read_csv('./dat/simulated_data.csv')
rank_fig <-
rank_sim %>%
ggplot() +
geom_point(
aes(x = reorder(name, -value), y = value)
) +
transition_states(
id,
transition_length = 2,
state_length = 1
) +
coord_flip() +
labs(x = NULL, y = 'Population size')
rank_fig
|
ef1cc9f026152176d5b939d0f059d6c7b9354d5c
|
cb9b42a440276a5ee2b509128b0447991d05ccf7
|
/Machine_learning_main.R
|
3b56abab5d8321b8237ffbd31c69922f47973b1b
|
[] |
no_license
|
lanagarmire/preeclampsa_lipidomics
|
2c4215e894969db49255f9088120afc2b9d223ae
|
121ac4ed7ed5a7d2bf4fe11ee3cf9967e63eab25
|
refs/heads/master
| 2023-01-08T03:08:15.590571
| 2020-11-12T22:22:50
| 2020-11-12T22:22:50
| 276,237,961
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
Machine_learning_main.R
|
setwd("./")
newdat <- readRDS('newlipid_9_13_20.rds')
newpd <- readRDS('newpd_pdonly.rds')
library(gbm)
library(caret)
lilikoimat <- newdat[-1]
newpd <- newpd[row.names(newdat),]
newdatpd <- cbind(newdat, newpd)
lilikoimat <- t(lilikoimat)
lilikoilabels <- newdat$Label
lilikoilabels[lilikoilabels == 'Preeclampsia'] <- 'Cancer'
lilikoilabels[lilikoilabels == 'Control'] <- 'Normal'
lilikoicands <- row.names(lilikoimat)
source('./machine_learning11.R')
lilikoires <- lilikoi.machine_learning11(PDSmatrix = lilikoimat,
measurementLabels = lilikoilabels,
significantPathways = lilikoicands,
selectmod = 'RF',
dividep = 0.8,
dividseed = 1996,
times = 10)
pdf(file = "Training.pdf",width = 8, height = 8)
print(lilikoires$p_training)
dev.off()
pdf(file = "Testing.pdf",width = 4, height = 8)
print(lilikoires$p_selected_model)
dev.off()
|
b174b35c265e6e36a1c50ffc071fcb3f8c67593d
|
6f6f97554599532e8345d769f96c9b6e9d2cb943
|
/httk/man/parameterize_steadystate.Rd
|
8604703b0ed9157a9438b684676da69022b24acf
|
[] |
no_license
|
jrsfeir/CompTox-ExpoCast-httk
|
37cbfa4142c261ed79d3141142a613a614b28d38
|
bf2f6c300fe2cf0c538c16355c2ec437ca781c55
|
refs/heads/main
| 2023-06-26T16:56:17.010294
| 2021-05-10T14:53:13
| 2021-05-10T14:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,906
|
rd
|
parameterize_steadystate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameterize_steadystate.R
\name{parameterize_steadystate}
\alias{parameterize_steadystate}
\title{Parameterize_SteadyState}
\usage{
parameterize_steadystate(
chem.cas = NULL,
chem.name = NULL,
dtxsid = NULL,
species = "Human",
clint.pvalue.threshold = 0.05,
default.to.human = FALSE,
human.clint.fup = FALSE,
adjusted.Funbound.plasma = TRUE,
restrictive.clearance = TRUE,
fup.lod.default = 0.005,
suppress.messages = FALSE,
minimum.Funbound.plasma = 1e-04
)
}
\arguments{
\item{chem.cas}{Chemical Abstract Services Registry Number (CAS-RN) -- the
chemical must be identified by either CAS, name, or DTXISD}
\item{chem.name}{Chemical name (spaces and capitalization ignored) -- the
chemical must be identified by either CAS, name, or DTXISD}
\item{dtxsid}{EPA's DSSTox Structure ID (\url{https://comptox.epa.gov/dashboard})
-- the chemical must be identified by either CAS, name, or DTXSIDs}
\item{species}{Species desired (either "Rat", "Rabbit", "Dog", "Mouse", or
default "Human").}
\item{clint.pvalue.threshold}{Hepatic clearances with clearance assays
having p-values greater than the threshold are set to zero.}
\item{default.to.human}{Substitutes missing rat values with human values if
true.}
\item{human.clint.fup}{Uses human hepatic intrinsic clearance and fraction
of unbound plasma in calculation of partition coefficients for rats if true.}
\item{adjusted.Funbound.plasma}{Returns adjusted Funbound.plasma when set to
TRUE.}
\item{restrictive.clearance}{In calculating hepatic.bioavailability, protein
binding is not taken into account (set to 1) in liver clearance if FALSE.}
\item{fup.lod.default}{Default value used for fraction of unbound plasma for
chemicals where measured value was below the limit of detection. Default
value is 0.0005.}
\item{suppress.messages}{Whether or not the output message is suppressed.}
\item{minimum.Funbound.plasma}{Monte Carlo draws less than this value are set
equal to this value (default is 0.0001 -- half the lowest measured Fup in our
dataset).}
}
\value{
\item{Clint}{Hepatic Intrinsic Clearance, uL/min/10^6 cells.}
\item{Fgutabs}{Fraction of the oral dose absorbed, i.e. the fraction of the
dose that enters the gutlumen.}
\item{Funbound.plasma}{Fraction of plasma that is not bound.}
\item{Qtotal.liverc}{Flow rate of blood exiting the liver, L/h/kg BW^3/4.}
\item{Qgfrc}{Glomerular Filtration Rate, L/h/kg
BW^3/4, volume of fluid filtered from kidney and excreted.}
\item{BW}{Body Weight, kg}
\item{MW}{Molecular Weight, g/mol}
\item{million.cells.per.gliver}{Millions cells per gram of liver tissue.}
\item{Vliverc}{Volume of the liver per kg body weight, L/kg BW.}
\item{liver.density}{Liver tissue density, kg/L.}
\item{Fhep.assay.correction}{The fraction of chemical unbound in hepatocyte
assay using the method of Kilford et al. (2008)}
\item{hepatic.bioavailability}{Fraction of dose remaining after first pass
clearance, calculated from the corrected well-stirred model.}
}
\description{
This function initializes the parameters needed in the functions
calc_mc_css, calc_mc_oral_equiv, and calc_analytic_css for the three
compartment steady state model ('3compartmentss').
}
\examples{
parameters <- parameterize_steadystate(chem.name='Bisphenol-A',species='Rat')
parameters <- parameterize_steadystate(chem.cas='80-05-7')
}
\references{
Pearce, Robert G., et al. "Httk: R package for high-throughput
toxicokinetics." Journal of statistical software 79.4 (2017): 1.
Kilford, P. J., Gertz, M., Houston, J. B. and Galetin, A.
(2008). Hepatocellular binding of drugs: correction for unbound fraction in
hepatocyte incubations using microsomal binding or drug lipophilicity data.
Drug Metabolism and Disposition 36(7), 1194-7, 10.1124/dmd.108.020834.
}
\author{
John Wambaugh
}
\keyword{3compss}
\keyword{Parameter}
|
aa6d42f7b23233e9aebcb44eeb11b77824d523bb
|
c51347680754745733293e00aacf7b633334c1fc
|
/R/plot.yphemi.R
|
8deb39fd26baf8ddf74533ce733ca78e89fcf119
|
[] |
no_license
|
cran/YplantQMC
|
771c341d00e410a0e61dbdadc02af8866d5cd198
|
dc62bfc247ba9d6dd92498e8afa00d511a36e00e
|
refs/heads/master
| 2021-01-21T21:47:33.241377
| 2016-05-23T06:34:50
| 2016-05-23T06:34:50
| 17,694,152
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,809
|
r
|
plot.yphemi.R
|
#'@method plot yphemi
#'@S3method plot yphemi
#'@rdname setHemi
plot.yphemi <- function(x,met=NULL,sungap=TRUE,
projection=c("iso","flat"),warn=TRUE,bordercol='black', ...){
projection <- match.arg(projection)
hemi <- x
o <- par(no.readonly=TRUE)
on.exit(par(o))
par(pty='s')
plot(1, type='n',
axes=FALSE, ann=FALSE,xlim=c(-1.15,1.15),ylim=c(-1.15,1.15))
# Plot the tiles of the hemiphoto.
for(i in 1:hemi$naz){
for(j in 1:hemi$nalt){
angs <- seq(hemi$azbins[i], hemi$azbins[i+1], length=25)
if(projection == "flat"){
r1 <- cos(hemi$altbins[j])
r2 <- cos(hemi$altbins[j+1])
}
if(projection == "iso"){
r1 <- 1 - hemi$altbins[j] / (pi/2)
r2 <- 1 - hemi$altbins[j+1] / (pi/2)
}
x1 <- r1 * sin(angs)
y1 <- r1 * cos(angs)
x2 <- r2 * sin(angs)
y2 <- r2 * cos(angs)
xx <-c(x1,rev(x2))
yy <-c(y1,rev(y2))
polygon(xx,yy,col=grey(hemi$m[j, i]), border=bordercol)
}
}
# Solar path.
if(!is.null(met)){
# Calculate solar az, alt, for many timesteps.
hrs <- seq(met$sunrise+10/60, met$sunset-10/60, length=101)
sunpos <- zenaz(met$year, met$month, met$day,
met$location$lat, met$location$long, met$location$tzlong,
timeofday=hrs)
sunpos2 <- zenaz(met$year, met$month, met$day,
met$location$lat, met$location$long, met$location$tzlong,
timeofday=met$dat$timeofday)
ALT <- sunpos$altitude
AZ <- sunpos$azimuth
if(projection == "flat"){
sX <- cos(ALT*pi/180) * sin(AZ*pi/180)
sY <- cos(ALT*pi/180) * cos(AZ*pi/180)
sunX <- cos(met$dat$altitude*pi/180) * sin(met$dat$azimuth*pi/180)
sunY <- cos(met$dat$altitude*pi/180) * cos(met$dat$azimuth*pi/180)
}
if(projection == "iso"){
r <- 1 - ALT / 90
sX <- r * sin(AZ*pi/180)
sY <- r * cos(AZ*pi/180)
rs <- 1 - met$dat$altitude / 90
sunX <- rs * sin(met$dat$azimuth*pi/180)
sunY <- rs * cos(met$dat$azimuth*pi/180)
}
gapfracdir <- evalHemi(hemi, met=met)$gapfraction
if(sungap){
points(sX, sY, col="darkorange2", pch=19, type='l')
points(sunX, sunY, col="darkorange2", pch=19, cex=3*gapfracdir)
if(max(gapfracdir,na.rm=TRUE) < 0.1 && warn)
warning("Low gap fraction : solar path probably not visible. Try: sungap=FALSE.")
} else {
points(sX, sY, type='l', col="darkorange2")
}
}
# Add circle; labels; legend.
angs <- seq(0, 2*pi, length=101)
x <- sin(angs)
y <- cos(angs)
points(x,y, type='l')
text(0,1.1,expression(bold(N)))
text(1.1,0,expression(bold(E)))
text(0,-1.1,expression(bold(S)))
text(-1.1,0,expression(bold(W)))
fracs <- c(0.0,0.25,0.5,0.75, 1.0)
legend("bottomleft", as.character(fracs),
fill=grey(fracs),cex=0.8,
y.intersp=1.0, title="Gap fraction")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.