content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rm(list = ls())
library('ggplot2')
library('data.table')
# sinks
# pdf("./sink/genesTMAanalysis.pdf")
sink("./sink/genesTMAanalysis.txt")
options(digits=3)
# tma score for genes SPAG5, SFRSF, CACNG4, CHD1, GA13
TMAscorePath= file.path(getwd(), 'data', 'TMAtable.txt')
DT <- data.table(read.table(TMAscorePath, header=TRUE, sep = "\t",
na.strings=c("NA", "N/A", "", "N"), stringsAsFactors=FALSE, strip.white=TRUE))
geneScore_DT <- DT[, .(ID=paste(TMA, Column, Row, sep=''), SPAG5, CACNG4 = CACNG4..NvsC, SFRS7 = SFRS7.percent, CHD1, GNA13 = GA13)]
TumorStat_DT <- DT[, .(ID=paste(TMA, Column, Row, sep=''), GRADE.nor, SIZE.nor, LVI, LN, ER, PR, HER.2, Type)]
DT <- merge(geneScore_DT, TumorStat_DT, by='ID')
DF <- data.frame(DT)
applyChiSquaredTests <- function(x){
Grade.tbl <- table(gene=x, Grade=DT$GRADE.nor, useNA='no')
Grade.pvalue <- summary(Grade.tbl)$p.value
Size.tbl <- table(gene=x, Size=DT$SIZE.nor, useNA='no')
Size.pvalue <- summary(Size.tbl)$p.value
LVI.tbl <- table(gene=x, LVI=DT$LVI, useNA='no')
LVI.pvalue <- summary(LVI.tbl)$p.value
LN.tbl <- table(gene=x, LN=DT$LN, useNA='no')
LN.pvalue <- summary(LN.tbl)$p.value
ER.tbl <- table(gene=x, ER=DT$ER, useNA='no')
ER.pvalue <- summary(ER.tbl)$p.value
PR.tbl <- table(gene=x, PR=DT$PR, useNA='no')
PR.pvalue <- summary(PR.tbl)$p.value
HER.2.tbl <- table(gene=x, HER.2=DT$HER.2, useNA='no')
HER.2.pvalue <- summary(HER.2.tbl)$p.value
summary <- c(grade = Grade.pvalue,
Size = Size.pvalue,
LVI = LVI.pvalue,
LN = LN.pvalue,
ER = ER.pvalue,
PR = PR.pvalue,
HER.2 = HER.2.pvalue)
return(summary)
}
applyFisherTests <- function(x){
Grade.tbl <- table(gene=x, Grade=DT$GRADE.nor, useNA='no')
Grade.pvalue <- fisher.test(Grade.tbl)$p.value
Size.tbl <- table(gene=x, Size=DT$SIZE.nor, useNA='no')
Size.pvalue <- fisher.test(Size.tbl)$p.value
LVI.tbl <- table(gene=x, LVI=DT$LVI, useNA='no')
LVI.pvalue <- fisher.test(LVI.tbl)$p.value
LN.tbl <- table(gene=x, LN=DT$LN, useNA='no')
LN.pvalue <- fisher.test(LN.tbl)$p.value
ER.tbl <- table(gene=x, ER=DT$ER, useNA='no')
ER.pvalue <- fisher.test(ER.tbl)$p.value
PR.tbl <- table(gene=x, PR=DT$PR, useNA='no')
PR.pvalue <- fisher.test(PR.tbl)$p.value
HER.2.tbl <- table(gene=x, HER.2=DT$HER.2, useNA='no')
HER.2.pvalue <- fisher.test(HER.2.tbl)$p.value
`summary` <- c(grade = Grade.pvalue,
Size = Size.pvalue,
LVI = LVI.pvalue,
LN = LN.pvalue,
ER = ER.pvalue,
PR = PR.pvalue,
HER.2 = HER.2.pvalue)
return(summary)
}
chisq.tbl <- data.frame(lapply(DF[,2:6], applyChiSquaredTests))
fisher.tbl <- data.frame(lapply(DF[,2:6], applyFisherTests))
cat('\n\nchi squared test p-value table\n\n')
print(chisq.tbl)
cat('\n\nfisher test p-value table\n\n')
print(fisher.tbl)
dev.off()
| /ListOfGene/init.R | no_license | tt6746690/LearnR | R | false | false | 2,957 | r | rm(list = ls())
library('ggplot2')
library('data.table')
# sinks
# pdf("./sink/genesTMAanalysis.pdf")
sink("./sink/genesTMAanalysis.txt")
options(digits=3)
# tma score for genes SPAG5, SFRSF, CACNG4, CHD1, GA13
TMAscorePath= file.path(getwd(), 'data', 'TMAtable.txt')
DT <- data.table(read.table(TMAscorePath, header=TRUE, sep = "\t",
na.strings=c("NA", "N/A", "", "N"), stringsAsFactors=FALSE, strip.white=TRUE))
geneScore_DT <- DT[, .(ID=paste(TMA, Column, Row, sep=''), SPAG5, CACNG4 = CACNG4..NvsC, SFRS7 = SFRS7.percent, CHD1, GNA13 = GA13)]
TumorStat_DT <- DT[, .(ID=paste(TMA, Column, Row, sep=''), GRADE.nor, SIZE.nor, LVI, LN, ER, PR, HER.2, Type)]
DT <- merge(geneScore_DT, TumorStat_DT, by='ID')
DF <- data.frame(DT)
applyChiSquaredTests <- function(x){
Grade.tbl <- table(gene=x, Grade=DT$GRADE.nor, useNA='no')
Grade.pvalue <- summary(Grade.tbl)$p.value
Size.tbl <- table(gene=x, Size=DT$SIZE.nor, useNA='no')
Size.pvalue <- summary(Size.tbl)$p.value
LVI.tbl <- table(gene=x, LVI=DT$LVI, useNA='no')
LVI.pvalue <- summary(LVI.tbl)$p.value
LN.tbl <- table(gene=x, LN=DT$LN, useNA='no')
LN.pvalue <- summary(LN.tbl)$p.value
ER.tbl <- table(gene=x, ER=DT$ER, useNA='no')
ER.pvalue <- summary(ER.tbl)$p.value
PR.tbl <- table(gene=x, PR=DT$PR, useNA='no')
PR.pvalue <- summary(PR.tbl)$p.value
HER.2.tbl <- table(gene=x, HER.2=DT$HER.2, useNA='no')
HER.2.pvalue <- summary(HER.2.tbl)$p.value
summary <- c(grade = Grade.pvalue,
Size = Size.pvalue,
LVI = LVI.pvalue,
LN = LN.pvalue,
ER = ER.pvalue,
PR = PR.pvalue,
HER.2 = HER.2.pvalue)
return(summary)
}
applyFisherTests <- function(x){
Grade.tbl <- table(gene=x, Grade=DT$GRADE.nor, useNA='no')
Grade.pvalue <- fisher.test(Grade.tbl)$p.value
Size.tbl <- table(gene=x, Size=DT$SIZE.nor, useNA='no')
Size.pvalue <- fisher.test(Size.tbl)$p.value
LVI.tbl <- table(gene=x, LVI=DT$LVI, useNA='no')
LVI.pvalue <- fisher.test(LVI.tbl)$p.value
LN.tbl <- table(gene=x, LN=DT$LN, useNA='no')
LN.pvalue <- fisher.test(LN.tbl)$p.value
ER.tbl <- table(gene=x, ER=DT$ER, useNA='no')
ER.pvalue <- fisher.test(ER.tbl)$p.value
PR.tbl <- table(gene=x, PR=DT$PR, useNA='no')
PR.pvalue <- fisher.test(PR.tbl)$p.value
HER.2.tbl <- table(gene=x, HER.2=DT$HER.2, useNA='no')
HER.2.pvalue <- fisher.test(HER.2.tbl)$p.value
`summary` <- c(grade = Grade.pvalue,
Size = Size.pvalue,
LVI = LVI.pvalue,
LN = LN.pvalue,
ER = ER.pvalue,
PR = PR.pvalue,
HER.2 = HER.2.pvalue)
return(summary)
}
chisq.tbl <- data.frame(lapply(DF[,2:6], applyChiSquaredTests))
fisher.tbl <- data.frame(lapply(DF[,2:6], applyFisherTests))
cat('\n\nchi squared test p-value table\n\n')
print(chisq.tbl)
cat('\n\nfisher test p-value table\n\n')
print(fisher.tbl)
dev.off()
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% 901.MatlabServer.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{1. The MATLAB server running in MATLAB}
\alias{1. The MATLAB server running in MATLAB}
\title{1. The MATLAB server running in MATLAB}
\description{
This section gives addition details on the MATLAB server.
At the end, the MatlabServer.m script and the
InputStreamByteWrapper.java code is shown.
}
\section{Starting the MATLAB server on Windows}{
Note that you "cannot prevent MATLAB from creating a window when
starting on Windows systems, but you can force the window to be hidden,
by using " the option -minimize.
See \url{http://www.mathworks.com/support/solutions/data/1-16B8X.html}
for more information.
}
\section{MatlabServer.m script}{
\preformatted{
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
\% MatlabServer
\%
\% This scripts starts a minimalistic MATLAB "server".
\%
\% When started, the server listens for connections at port 9999 or the
\% port number specified by the environment variable 'MATLABSERVER_PORT'.
\%
\% Troubleshooting: If not working out of the box, add this will to the
\% MATLAB path. Make sure InputStreamByteWrapper.class is in the same
\% directory as this file!
\%
\% Requirements:
\% This requires MATLAB with Java support, i.e. MATLAB v6 or higher.
\%
\% Author: Henrik Bengtsson, 2002-2014
\%
\% References:
\% [1] http://www.mathworks.com/access/helpdesk/help/techdoc/
\% matlab_external/ch_jav34.shtml#49439
\% [2] http://staff.science.uva.nl/~horus/dox/horus2.0/user/
\% html/n_installUnix.html
\% [3] http://www.mathworks.com/access/helpdesk/help/toolbox/
\% modelsim/a1057689278b4.html
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
disp('Running MatlabServer v3.0.2');
\% addpath R/R_LIBS/linux/library/R.matlab/misc/
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% MATLAB version-dependent setup
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Identify major version of Matlab
hasMajor = eval('length(regexp(version, ''^[0-9]'')) ~= 0', '0');
if (hasMajor)
verParts = sscanf(version, '\%d.');
verMajor = verParts(1);
else
verMajor = -1;
end
if (verMajor < 6)
\% Java is not available/supported
error('MATLAB v5.x and below is not supported.');
elseif (verMajor == 6)
disp('MATLAB v6.x detected.');
\% Default save option
saveOption = '';
\% In MATLAB v6 only the static Java CLASSPATH is supported. It is
\% specified by a 'classpath.txt' file. The default one can be found
\% by which('classpath.txt'). If a 'classpath.txt' exists in the
\% current(!) directory (that MATLAB is started from), it *replaces*
\% the global one. Thus, it is not possible to add additional paths;
\% the global ones has to be copied to the local 'classpath.txt' file.
\%
\% To do the above automatically from R, does not seem to be an option.
else
disp('MATLAB v7.x or higher detected.');
\% MATLAB v7 and above saves compressed files, which is not recognized
\% by R.matlab's readMat(); force saving in old format.
saveOption = '-V6';
disp('Saving with option -V6.');
\% In MATLAB v7 and above both static and dynamic Java CLASSPATH:s exist.
\% Using dynamic ones, it is possible to add the file
\% InputStreamByteWrapper.class to CLASSPATH, given it is
\% in the same directory as this script.
javaaddpath({fileparts(which('MatlabServer'))});
disp('Added InputStreamByteWrapper to dynamic Java CLASSPATH.');
end
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Import Java classes
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import java.io.*;
import java.net.*;
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% If an old MATLAB server is running, close it
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% If a server object exists from a previous run, close it.
if (exist('server'))
close(server);
clear server;
end
\% If an input stream exists from a previous run, close it.
if (exist('is'))
close(is);
clear is;
end
\% If an output stream exists from a previous run, close it.
if (exist('os'))
close(os);
clear os;
end
fprintf(1, '----------------------\n');
fprintf(1, 'MATLAB server started!\n');
fprintf(1, '----------------------\n');
fprintf(1, 'MATLAB working directory: \%s\n', pwd);
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Initiate server socket to which clients may connect
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
port = getenv('MATLABSERVER_PORT');
if (length(port) > 0)
port = str2num(port);
else
\% Try to open a server socket on port 9999
port = 9999;
end
\% Ports 1-1023 are reserved for the Internet Assigned Numbers Authority.
\% Ports 49152-65535 are dynamic ports for the OS. [3]
if (port < 1023 | port > 65535)
error('Cannot not open connection. Port (''MATLABSERVER_PORT'') is out of range [1023,65535]: \%d', port);
end
fprintf(1, 'Trying to open server socket (port \%d)...', port);
server = java.net.ServerSocket(port);
fprintf(1, 'done.\n');
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Wait for client to connect
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Create a socket object from the ServerSocket to listen and accept
\% connections.
\% Open input and output streams
\% Wait for the client to connect
fprintf(1, 'Waiting for client to connect (port \%d)...', port);
clientSocket = accept(server);
fprintf(1, 'connected.\n');
\% ...client connected.
is = java.io.DataInputStream(getInputStream(clientSocket));
\%is = java.io.BufferedReader(InputStreamReader(is0));
os = java.io.DataOutputStream(getOutputStream(clientSocket));
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% The MATLAB server state machine
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Commands
commands = {'eval', 'send', 'receive', 'send-remote', 'receive-remote', 'echo'};
lasterr = [];
variables = [];
\% As long as we receive data, echo that data back to the client.
state = 0;
while (state >= 0),
if (state == 0)
cmd = readByte(is);
fprintf(1, 'Received cmd: \%d\n', cmd);
if (cmd < -1 | cmd > length(commands))
fprintf(1, 'Unknown command code: \%d\n', cmd);
else
state = cmd;
end
\%-------------------
\% 'eval'
\%-------------------
elseif (state == strmatch('eval', commands, 'exact'))
bfr = char(readUTF(is));
fprintf(1, '"eval" string: "\%s"\n', bfr);
try
eval(bfr);
writeByte(os, 0);
fprintf(1, 'Sent byte: \%d\n', 0);
flush(os);
catch
fprintf(1, 'EvaluationException: \%s\n', lasterr);
writeByte(os, -1);
fprintf(1, 'Sent byte: \%d\n', -1);
writeUTF(os, lasterr);
fprintf(1, 'Sent UTF: \%s\n', lasterr);
flush(os);
end
flush(os);
state = 0;
\%-------------------
\% 'send'
\%-------------------
elseif (state == strmatch('send', commands, 'exact'))
tmpname = sprintf('\%s.mat', tempname);
expr = sprintf('save(tmpname, ''\%s''', saveOption);
ok = 1;
for k=1:length(variables),
variable = variables{k};
if (exist(variable) ~= 1)
lasterr = sprintf('Variable ''\%s'' not found.', variable);
disp(lasterr);
ok = 0;
break;
end;
expr = sprintf('\%s, ''\%s''', expr, variable);
end;
expr = sprintf('\%s)', expr);
if (~ok)
writeInt(os, -1);
writeUTF(os, lasterr);
else
disp(expr);
eval(expr);
writeUTF(os, tmpname);
end
answer = readByte(is);
fprintf('answer=\%d\n', answer);
state = 0;
\%-------------------
\% 'send-remote'
\%-------------------
elseif (state == strmatch('send-remote', commands, 'exact'))
tmpname = sprintf('\%s.mat', tempname);
expr = sprintf('save(tmpname, ''\%s''', saveOption);
ok = 1;
for k=1:length(variables),
variable = variables{k};
if (exist(variable) ~= 1)
lasterr = sprintf('Variable ''\%s'' not found.', variable);
disp(lasterr);
ok = 0;
break;
end;
expr = sprintf('\%s, ''\%s''', expr, variable);
end;
expr = sprintf('\%s)', expr);
if (~ok)
writeInt(os, -1);
writeUTF(os, lasterr);
else
disp(expr);
eval(expr);
file = java.io.File(tmpname);
maxLength = length(file);
clear file;
writeInt(os, maxLength);
fprintf(1, 'Send int: \%d (maxLength)\n', maxLength);
fid = fopen(tmpname, 'r');
count = 1;
while (count ~= 0)
[bfr, count] = fread(fid, 65536, 'int8');
if (count > 0)
write(os, bfr);
\% fprintf(1, 'Wrote \%d byte(s).\n', length(bfr));
end;
end;
fclose(fid);
\% fprintf(1, 'Wrote!\n');
fprintf(1, 'Send buffer: \%d bytes.\n', maxLength);
delete(tmpname);
clear bfr, count, maxLength, fid, tmpname;
end
flush(os);
answer = readByte(is);
fprintf('answer=\%d\n', answer);
state = 0;
\%-------------------
\% 'receive-remote'
\%-------------------
elseif (state == strmatch('receive-remote', commands, 'exact'))
len = readInt(is);
fprintf(1, 'Will read MAT file structure of length: \%d bytes.\n', len);
reader = InputStreamByteWrapper(4096);
bfr = [];
count = 1;
while (len > 0 & count > 0)
count = reader.read(is, min(4096, len));
if (count > 0)
bfr = [bfr; reader.bfr(1:count)];
len = len - count;
end;
end;
clear reader count len;
tmpfile = sprintf('\%s.mat', tempname);
\% tmpfile = 'tmp2.mat';
\% disp(bfr');
\% disp(tmpfile);
fh = fopen(tmpfile, 'wb');
fwrite(fh, bfr, 'int8');
fclose(fh);
clear fh, bfr;
load(tmpfile);
delete(tmpfile);
clear tmpfile;
writeByte(os, 0);
state = 0;
\%-------------------
\% 'receive'
\%-------------------
elseif (state == strmatch('receive', commands, 'exact'))
filename = char(readUTF(is));
fprintf(1, 'Will read MAT file: "\%s"\n', filename);
load(filename);
clear filename;
writeByte(os, 0);
state = 0;
end
end
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Shutting down the MATLAB server
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fprintf(1, '-----------------------\n');
fprintf(1, 'MATLAB server shutdown!\n');
fprintf(1, '-----------------------\n');
writeByte(os, 0);
close(os);
close(is);
close(server);
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
\% HISTORY:
\% 2014-06-23 [v3.0.2]
\% o ROBUSTNESS: Variables 'lasterr' and 'variables' are now always
\% defined. Potential bug spotted by Steven Jaffe at Morgan Stanley.
\% o Added more progress/verbose output, e.g. current working directory.
\% 2014-01-21 [v2.2.0]
\% o BUG FIX: The MatlabServer.m script would incorrectly consider
\% Matlab v8 and above as Matlab v6. Thanks to Frank Stephen at NREL
\% for reporting on this and providing a patch.
\% 2013-07-11 [v1.3.5]
\% o Updated messages to use 'MATLAB' instead of 'Matlab'.
\% 2010-10-25 [v1.3.4]
\% o BUG FIX: The MatlabServer.m script incorrectly referred to the
\% InputStreamByteWrapper class as java.io.InputStreamByteWrapper.
\% Thanks Kenvor Cothey at GMO LCC for reporting on this.
\% 2010-08-28
\% o Now the MatlabServer script reports its version when started.
\% 2010-08-27
\% o BUG FIX: Now MatlabServer.m saves variables using the function form,
\% i.e. save(). This solves the problem of having single quotation marks
\% in the pathname. Thanks Michael Q. Fan at NC State University for
\% reporting this problem.
\% 2009-08-25
\% o BUG FIX: Started to get the error "Undefined function or method
\% 'ServerSocket' for input arguments of type 'double'.". It seems like
\% import java.net.* etc does not work. A workaround is to specify the
\% full path for all Java classes, e.g. java.net.ServerSocket.
\% Thanks Nicolas Stadler for reporting this issue.
\% 2006-12-28
\% o Extended the accepted range of ports from [1023,49151] to [1023,66535].
\% 2006-05-08
\% o BUG FIX: The error message string for reporting port out of range
\% was invalid and gave the error '... Line: 109 Column: 45 ")" expected,
\% "identifier" found.'. Thanks Alexander Nervedi for reporting this.
\% 2006-01-21
\% o Now an error is thrown if port number is out of (safe) range.
\% o Added option to specify the port number via the system environment
\% variable MATLABSERVER_PORT, after request by Wang Yu, Iowa State Univ.
\% 2005-03-08
\% o BUG FIX: substring() is not recognized by MATLAB v7. Using regexp()
\% which works in MATLAB 6.5 and 7. Workaround eval('try', 'catch').
\% Thanks Patrick Drechsler, University of Wuerzburg for the bug report.
\% 2005-02-24
\% o Now the dynamic Java classpath is set for MATLAB v7 or higher. This
\% will simplify life for MATLAB v7 users.
\% 2005-02-22
\% o Added javaaddpath() to include InputStreamByteWrapper.class.
\% Thanks Yichun Wei for feedback and great suggestions.
\% 2005-02-11
\% o If MATLAB v7 or higher is detected, all MAT structures are saved with
\% option '-V6' so readMat() in R.matlab can read them.
\% 2002-09-02 [or maybe a little bit earlier]
\% o Created.
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
}\emph{}
}
\section{InputStreamByteWrapper.(class|java) script}{
The Java class InputStreamByteWrapper is needed in order for MATLAB to
\emph{receive} \emph{data} via a data stream. \R sends data via a data
stream if, and only if, the connection was setup for "remote"
communication, that is, with argument \code{remote=TRUE}).
\preformatted{
import java.io.*;
/*********************************************************************
\% Compile from within MATLAB with:
\% !javac InputStreamByteWrapper.java
\% MATLAB example that reads a file using Java code and writes it
\% back to a temporary file using MATLAB code. Finally the contents
\% of the new file is displayed.
reader = InputStreamByteWrapper; \% Default buffer size is 4096 bytes.
in = java.io.FileInputStream('InputStreamByteWrapper.java');
bfr = [];
len = 1;
while (len > 0)
len = reader.read(in, 16); \% Read 16 bytes at the time (offset=0).
if (len > 0)
bfr = [bfr; reader.bfr(1:len)]; \% Add bytes to my MATLAB buffer.
end
end
close(in);
clear in, reader;
disp(bfr');
tmpfile = tempname;
fh = fopen(tmpfile, 'wb');
fwrite(fh, bfr, 'char');
fclose(fh);
type(tmpfile);
*********************************************************************/
public class InputStreamByteWrapper {
public static byte[] bfr = null;
public InputStreamByteWrapper(int capasity) {
bfr = new byte[capasity];
}
public InputStreamByteWrapper() {
this(4096);
}
public int read(InputStream in, int offset, int length) throws IOException {
return in.read(bfr, offset, length);
}
public int read(InputStream in, int length) throws IOException {
return read(in, 0, length);
}
public int read(InputStream in) throws IOException {
return in.read(bfr);
}
}
/*********************************************************************
HISTORY:
2013-07-11
o Updated comments to use 'MATLAB' instead of 'Matlab'.
2002-09-02 [or maybe a little bit earlier]
o Created.
*********************************************************************/
}\emph{}
}
\keyword{documentation}
| /man/1._The_MATLAB_server_running_in_MATLAB.Rd | no_license | ajauhri/R.matlab | R | false | false | 16,369 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% 901.MatlabServer.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{1. The MATLAB server running in MATLAB}
\alias{1. The MATLAB server running in MATLAB}
\title{1. The MATLAB server running in MATLAB}
\description{
This section gives addition details on the MATLAB server.
At the end, the MatlabServer.m script and the
InputStreamByteWrapper.java code is shown.
}
\section{Starting the MATLAB server on Windows}{
Note that you "cannot prevent MATLAB from creating a window when
starting on Windows systems, but you can force the window to be hidden,
by using " the option -minimize.
See \url{http://www.mathworks.com/support/solutions/data/1-16B8X.html}
for more information.
}
\section{MatlabServer.m script}{
\preformatted{
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
\% MatlabServer
\%
\% This scripts starts a minimalistic MATLAB "server".
\%
\% When started, the server listens for connections at port 9999 or the
\% port number specified by the environment variable 'MATLABSERVER_PORT'.
\%
\% Troubleshooting: If not working out of the box, add this will to the
\% MATLAB path. Make sure InputStreamByteWrapper.class is in the same
\% directory as this file!
\%
\% Requirements:
\% This requires MATLAB with Java support, i.e. MATLAB v6 or higher.
\%
\% Author: Henrik Bengtsson, 2002-2014
\%
\% References:
\% [1] http://www.mathworks.com/access/helpdesk/help/techdoc/
\% matlab_external/ch_jav34.shtml#49439
\% [2] http://staff.science.uva.nl/~horus/dox/horus2.0/user/
\% html/n_installUnix.html
\% [3] http://www.mathworks.com/access/helpdesk/help/toolbox/
\% modelsim/a1057689278b4.html
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
disp('Running MatlabServer v3.0.2');
\% addpath R/R_LIBS/linux/library/R.matlab/misc/
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% MATLAB version-dependent setup
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Identify major version of Matlab
hasMajor = eval('length(regexp(version, ''^[0-9]'')) ~= 0', '0');
if (hasMajor)
verParts = sscanf(version, '\%d.');
verMajor = verParts(1);
else
verMajor = -1;
end
if (verMajor < 6)
\% Java is not available/supported
error('MATLAB v5.x and below is not supported.');
elseif (verMajor == 6)
disp('MATLAB v6.x detected.');
\% Default save option
saveOption = '';
\% In MATLAB v6 only the static Java CLASSPATH is supported. It is
\% specified by a 'classpath.txt' file. The default one can be found
\% by which('classpath.txt'). If a 'classpath.txt' exists in the
\% current(!) directory (that MATLAB is started from), it *replaces*
\% the global one. Thus, it is not possible to add additional paths;
\% the global ones has to be copied to the local 'classpath.txt' file.
\%
\% To do the above automatically from R, does not seem to be an option.
else
disp('MATLAB v7.x or higher detected.');
\% MATLAB v7 and above saves compressed files, which is not recognized
\% by R.matlab's readMat(); force saving in old format.
saveOption = '-V6';
disp('Saving with option -V6.');
\% In MATLAB v7 and above both static and dynamic Java CLASSPATH:s exist.
\% Using dynamic ones, it is possible to add the file
\% InputStreamByteWrapper.class to CLASSPATH, given it is
\% in the same directory as this script.
javaaddpath({fileparts(which('MatlabServer'))});
disp('Added InputStreamByteWrapper to dynamic Java CLASSPATH.');
end
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Import Java classes
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import java.io.*;
import java.net.*;
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% If an old MATLAB server is running, close it
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% If a server object exists from a previous run, close it.
if (exist('server'))
close(server);
clear server;
end
\% If an input stream exists from a previous run, close it.
if (exist('is'))
close(is);
clear is;
end
\% If an output stream exists from a previous run, close it.
if (exist('os'))
close(os);
clear os;
end
fprintf(1, '----------------------\n');
fprintf(1, 'MATLAB server started!\n');
fprintf(1, '----------------------\n');
fprintf(1, 'MATLAB working directory: \%s\n', pwd);
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Initiate server socket to which clients may connect
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
port = getenv('MATLABSERVER_PORT');
if (length(port) > 0)
port = str2num(port);
else
\% Try to open a server socket on port 9999
port = 9999;
end
\% Ports 1-1023 are reserved for the Internet Assigned Numbers Authority.
\% Ports 49152-65535 are dynamic ports for the OS. [3]
if (port < 1023 | port > 65535)
error('Cannot not open connection. Port (''MATLABSERVER_PORT'') is out of range [1023,65535]: \%d', port);
end
fprintf(1, 'Trying to open server socket (port \%d)...', port);
server = java.net.ServerSocket(port);
fprintf(1, 'done.\n');
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Wait for client to connect
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Create a socket object from the ServerSocket to listen and accept
\% connections.
\% Open input and output streams
\% Wait for the client to connect
fprintf(1, 'Waiting for client to connect (port \%d)...', port);
clientSocket = accept(server);
fprintf(1, 'connected.\n');
\% ...client connected.
is = java.io.DataInputStream(getInputStream(clientSocket));
\%is = java.io.BufferedReader(InputStreamReader(is0));
os = java.io.DataOutputStream(getOutputStream(clientSocket));
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% The MATLAB server state machine
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Commands
commands = {'eval', 'send', 'receive', 'send-remote', 'receive-remote', 'echo'};
lasterr = [];
variables = [];
\% As long as we receive data, echo that data back to the client.
state = 0;
while (state >= 0),
if (state == 0)
cmd = readByte(is);
fprintf(1, 'Received cmd: \%d\n', cmd);
if (cmd < -1 | cmd > length(commands))
fprintf(1, 'Unknown command code: \%d\n', cmd);
else
state = cmd;
end
\%-------------------
\% 'eval'
\%-------------------
elseif (state == strmatch('eval', commands, 'exact'))
bfr = char(readUTF(is));
fprintf(1, '"eval" string: "\%s"\n', bfr);
try
eval(bfr);
writeByte(os, 0);
fprintf(1, 'Sent byte: \%d\n', 0);
flush(os);
catch
fprintf(1, 'EvaluationException: \%s\n', lasterr);
writeByte(os, -1);
fprintf(1, 'Sent byte: \%d\n', -1);
writeUTF(os, lasterr);
fprintf(1, 'Sent UTF: \%s\n', lasterr);
flush(os);
end
flush(os);
state = 0;
\%-------------------
\% 'send'
\%-------------------
elseif (state == strmatch('send', commands, 'exact'))
tmpname = sprintf('\%s.mat', tempname);
expr = sprintf('save(tmpname, ''\%s''', saveOption);
ok = 1;
for k=1:length(variables),
variable = variables{k};
if (exist(variable) ~= 1)
lasterr = sprintf('Variable ''\%s'' not found.', variable);
disp(lasterr);
ok = 0;
break;
end;
expr = sprintf('\%s, ''\%s''', expr, variable);
end;
expr = sprintf('\%s)', expr);
if (~ok)
writeInt(os, -1);
writeUTF(os, lasterr);
else
disp(expr);
eval(expr);
writeUTF(os, tmpname);
end
answer = readByte(is);
fprintf('answer=\%d\n', answer);
state = 0;
\%-------------------
\% 'send-remote'
\%-------------------
elseif (state == strmatch('send-remote', commands, 'exact'))
tmpname = sprintf('\%s.mat', tempname);
expr = sprintf('save(tmpname, ''\%s''', saveOption);
ok = 1;
for k=1:length(variables),
variable = variables{k};
if (exist(variable) ~= 1)
lasterr = sprintf('Variable ''\%s'' not found.', variable);
disp(lasterr);
ok = 0;
break;
end;
expr = sprintf('\%s, ''\%s''', expr, variable);
end;
expr = sprintf('\%s)', expr);
if (~ok)
writeInt(os, -1);
writeUTF(os, lasterr);
else
disp(expr);
eval(expr);
file = java.io.File(tmpname);
maxLength = length(file);
clear file;
writeInt(os, maxLength);
fprintf(1, 'Send int: \%d (maxLength)\n', maxLength);
fid = fopen(tmpname, 'r');
count = 1;
while (count ~= 0)
[bfr, count] = fread(fid, 65536, 'int8');
if (count > 0)
write(os, bfr);
\% fprintf(1, 'Wrote \%d byte(s).\n', length(bfr));
end;
end;
fclose(fid);
\% fprintf(1, 'Wrote!\n');
fprintf(1, 'Send buffer: \%d bytes.\n', maxLength);
delete(tmpname);
clear bfr, count, maxLength, fid, tmpname;
end
flush(os);
answer = readByte(is);
fprintf('answer=\%d\n', answer);
state = 0;
\%-------------------
\% 'receive-remote'
\%-------------------
elseif (state == strmatch('receive-remote', commands, 'exact'))
len = readInt(is);
fprintf(1, 'Will read MAT file structure of length: \%d bytes.\n', len);
reader = InputStreamByteWrapper(4096);
bfr = [];
count = 1;
while (len > 0 & count > 0)
count = reader.read(is, min(4096, len));
if (count > 0)
bfr = [bfr; reader.bfr(1:count)];
len = len - count;
end;
end;
clear reader count len;
tmpfile = sprintf('\%s.mat', tempname);
\% tmpfile = 'tmp2.mat';
\% disp(bfr');
\% disp(tmpfile);
fh = fopen(tmpfile, 'wb');
fwrite(fh, bfr, 'int8');
fclose(fh);
clear fh, bfr;
load(tmpfile);
delete(tmpfile);
clear tmpfile;
writeByte(os, 0);
state = 0;
\%-------------------
\% 'receive'
\%-------------------
elseif (state == strmatch('receive', commands, 'exact'))
filename = char(readUTF(is));
fprintf(1, 'Will read MAT file: "\%s"\n', filename);
load(filename);
clear filename;
writeByte(os, 0);
state = 0;
end
end
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\% Shutting down the MATLAB server
\% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fprintf(1, '-----------------------\n');
fprintf(1, 'MATLAB server shutdown!\n');
fprintf(1, '-----------------------\n');
writeByte(os, 0);
close(os);
close(is);
close(server);
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
\% HISTORY:
\% 2014-06-23 [v3.0.2]
\% o ROBUSTNESS: Variables 'lasterr' and 'variables' are now always
\% defined. Potential bug spotted by Steven Jaffe at Morgan Stanley.
\% o Added more progress/verbose output, e.g. current working directory.
\% 2014-01-21 [v2.2.0]
\% o BUG FIX: The MatlabServer.m script would incorrectly consider
\% Matlab v8 and above as Matlab v6. Thanks to Frank Stephen at NREL
\% for reporting on this and providing a patch.
\% 2013-07-11 [v1.3.5]
\% o Updated messages to use 'MATLAB' instead of 'Matlab'.
\% 2010-10-25 [v1.3.4]
\% o BUG FIX: The MatlabServer.m script incorrectly referred to the
\% InputStreamByteWrapper class as java.io.InputStreamByteWrapper.
\% Thanks Kenvor Cothey at GMO LCC for reporting on this.
\% 2010-08-28
\% o Now the MatlabServer script reports its version when started.
\% 2010-08-27
\% o BUG FIX: Now MatlabServer.m saves variables using the function form,
\% i.e. save(). This solves the problem of having single quotation marks
\% in the pathname. Thanks Michael Q. Fan at NC State University for
\% reporting this problem.
\% 2009-08-25
\% o BUG FIX: Started to get the error "Undefined function or method
\% 'ServerSocket' for input arguments of type 'double'.". It seems like
\% import java.net.* etc does not work. A workaround is to specify the
\% full path for all Java classes, e.g. java.net.ServerSocket.
\% Thanks Nicolas Stadler for reporting this issue.
\% 2006-12-28
\% o Extended the accepted range of ports from [1023,49151] to [1023,66535].
\% 2006-05-08
\% o BUG FIX: The error message string for reporting port out of range
\% was invalid and gave the error '... Line: 109 Column: 45 ")" expected,
\% "identifier" found.'. Thanks Alexander Nervedi for reporting this.
\% 2006-01-21
\% o Now an error is thrown if port number is out of (safe) range.
\% o Added option to specify the port number via the system environment
\% variable MATLABSERVER_PORT, after request by Wang Yu, Iowa State Univ.
\% 2005-03-08
\% o BUG FIX: substring() is not recognized by MATLAB v7. Using regexp()
\% which works in MATLAB 6.5 and 7. Workaround eval('try', 'catch').
\% Thanks Patrick Drechsler, University of Wuerzburg for the bug report.
\% 2005-02-24
\% o Now the dynamic Java classpath is set for MATLAB v7 or higher. This
\% will simplify life for MATLAB v7 users.
\% 2005-02-22
\% o Added javaaddpath() to include InputStreamByteWrapper.class.
\% Thanks Yichun Wei for feedback and great suggestions.
\% 2005-02-11
\% o If MATLAB v7 or higher is detected, all MAT structures are saved with
\% option '-V6' so readMat() in R.matlab can read them.
\% 2002-09-02 [or maybe a little bit earlier]
\% o Created.
\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%\%
}\emph{}
}
\section{InputStreamByteWrapper.(class|java) script}{
The Java class InputStreamByteWrapper is needed in order for MATLAB to
\emph{receive} \emph{data} via a data stream. \R sends data via a data
stream if, and only if, the connection was setup for "remote"
communication, that is, with argument \code{remote=TRUE}).
\preformatted{
import java.io.*;
/*********************************************************************
\% Compile from within MATLAB with:
\% !javac InputStreamByteWrapper.java
\% MATLAB example that reads a file using Java code and writes it
\% back to a temporary file using MATLAB code. Finally the contents
\% of the new file is displayed.
reader = InputStreamByteWrapper; \% Default buffer size is 4096 bytes.
in = java.io.FileInputStream('InputStreamByteWrapper.java');
bfr = [];
len = 1;
while (len > 0)
len = reader.read(in, 16); \% Read 16 bytes at the time (offset=0).
if (len > 0)
bfr = [bfr; reader.bfr(1:len)]; \% Add bytes to my MATLAB buffer.
end
end
close(in);
clear in, reader;
disp(bfr');
tmpfile = tempname;
fh = fopen(tmpfile, 'wb');
fwrite(fh, bfr, 'char');
fclose(fh);
type(tmpfile);
*********************************************************************/
public class InputStreamByteWrapper {
public static byte[] bfr = null;
public InputStreamByteWrapper(int capasity) {
bfr = new byte[capasity];
}
public InputStreamByteWrapper() {
this(4096);
}
public int read(InputStream in, int offset, int length) throws IOException {
return in.read(bfr, offset, length);
}
public int read(InputStream in, int length) throws IOException {
return read(in, 0, length);
}
public int read(InputStream in) throws IOException {
return in.read(bfr);
}
}
/*********************************************************************
HISTORY:
2013-07-11
o Updated comments to use 'MATLAB' instead of 'Matlab'.
2002-09-02 [or maybe a little bit earlier]
o Created.
*********************************************************************/
}\emph{}
}
\keyword{documentation}
|
setwd("C:/MyDataDir/Client/R-Code")
datafile <- "household_power_consumption.txt"
data <- read.table(datafile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
dateNtime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GactivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(dateNtime, GactivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | pgupta05/ExData_Plotting1 | R | false | false | 521 | r | setwd("C:/MyDataDir/Client/R-Code")
datafile <- "household_power_consumption.txt"
data <- read.table(datafile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
dateNtime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GactivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(dateNtime, GactivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
#' File extension-to-MIME mapping data frame
#'
#' @docType data
#' @export
structure(list(extension = c("pyc", "dwg", "ez", "aw", "arj",
"atom", "xml", "atomcat", "atomsvc", "mm", "mme", "hqx", "hqx",
"boo", "book", "ccxml", "cdf", "cdmia", "cdmic", "cdmid", "cdmio",
"cdmiq", "ccad", "dp", "cu", "csm", "davmount", "dbk", "drw",
"tsp", "dssc", "xdssc", "dxf", "es", "ecma", "js", "emma", "evy",
"epub", "xl", "xla", "xlb", "xlc", "xld", "xlk", "xll", "xlm",
"xls", "xlt", "xlv", "xlw", "exi", "pfr", "woff", "fif", "frl",
"spl", "gml", "tgz", "gpx", "vew", "gxf", "hlp", "hta", "stk",
"unv", "iges", "igs", "inf", "ink", "inkml", "acx", "ipfix",
"class", "jar", "class", "ser", "class", "js", "json", "jsonml",
"lha", "lostxml", "lzx", "bin", "hqx", "hqx", "cpt", "bin", "mads",
"mrc", "mrcx", "ma", "nb", "mb", "mathml", "mbd", "mbox", "mcd",
"mscml", "metalink", "meta4", "mets", "aps", "mods", "m21", "mp21",
"mp4", "m4p", "mp4s", "mdb", "one", "onetoc2", "onetmp", "onepkg",
"pot", "pps", "ppt", "ppz", "doc", "dot", "w6w", "wiz", "word",
"wri", "mxf", "mcp", "bin", "dms", "lrf", "mar", "so", "dist",
"distz", "pkg", "bpk", "dump", "elc", "a", "arc", "arj", "com",
"exe", "lha", "lhx", "lzh", "lzx", "o", "psd", "saveme", "uu",
"zoo", "class", "buffer", "deploy", "hqx", "obj", "lib", "zip",
"gz", "dmg", "iso", "oda", "opf", "ogx", "ogg", "axs", "omdoc",
"onetoc", "onetoc2", "onetmp", "onepkg", "oxps", "xer", "pdf",
"pgp", "key", "asc", "pgp", "sig", "prf", "p12", "crl", "p10",
"p7m", "p7c", "p7s", "p8", "ac", "cer", "crt", "crl", "pkipath",
"pki", "text", "pls", "ai", "eps", "ps", "ppt", "part", "prt",
"cww", "pskcxml", "rar", "rdf", "rif", "rnc", "rl", "rld", "rng",
"rs", "gbr", "mft", "roa", "rsd", "rss", "xml", "rtf", "rtx",
"sbml", "scq", "scs", "spq", "spp", "sdp", "sea", "set", "setpay",
"setreg", "shf", "stl", "smi", "smil", "smi", "smil", "sol",
"sdr", "rq", "srx", "gram", "grxml", "sru", "ssdl", "ssml", "step",
"stp", "ssm", "tei", "teicorpus", "tfi", "tsd", "tbk", "vda",
"plb", "psb", "pvb", "tcap", "pwn", "aso", "imp", "acu", "atc",
"acutc", "air", "fcdt", "fxp", "fxpl", "xdp", "xfdf", "ahead",
"azf", "azs", "azw", "acc", "ami", "apk", "cii", "fti", "atx",
"mpkg", "m3u8", "swi", "swi", "iota", "aep", "mpm", "bmi", "rep",
"cdxml", "mmd", "cdy", "cla", "rp9", "c4g", "c4d", "c4f", "c4p",
"c4u", "c11amc", "c11amz", "csp", "cdbcmsg", "cmc", "clkx", "clkk",
"clkp", "clkt", "clkw", "wbs", "pml", "ppd", "car", "pcurl",
"dart", "rdz", "uvf", "uvvf", "uvd", "uvvd", "uvt", "uvvt", "uvx",
"uvvx", "uvz", "uvvz", "fe_launch", "dna", "mlp", "dpg", "dfac",
"kpxx", "ait", "svc", "geo", "mag", "nml", "esf", "msf", "qam",
"slt", "ssf", "es3", "et3", "ez2", "ez3", "fdf", "mseed", "seed",
"dataless", "gph", "ftc", "fm", "frame", "maker", "book", "fnc",
"ltf", "fsc", "oas", "oa2", "oa3", "fg5", "bh2", "ddd", "xdw",
"xbd", "fzs", "txd", "ggb", "ggt", "gex", "gre", "gxt", "g2w",
"g3w", "gmx", "kml", "kmz", "gqf", "gqs", "gac", "ghf", "gim",
"grv", "gtm", "tpl", "vcg", "hal", "zmm", "hbci", "les", "hgl",
"hpg", "hpgl", "hpid", "hps", "jlt", "pcl", "pclxl", "sfd-hdstx",
"x3d", "mpy", "afp", "listafp", "list3820", "irm", "sc", "icc",
"icm", "igl", "ivp", "ivu", "igm", "xpw", "xpx", "i2g", "qbo",
"qfx", "rcprofile", "irp", "xpr", "fcs", "jam", "rms", "jisp",
"joda", "ktz", "ktr", "karbon", "chrt", "kfo", "flw", "kon",
"kpr", "kpt", "ksp", "kwd", "kwt", "htke", "kia", "kne", "knp",
"skp", "skd", "skt", "skm", "sse", "lasxml", "lbd", "lbe", "123",
"apr", "pre", "nsf", "org", "scm", "lwp", "portpkg", "mcd", "mc1",
"cdkey", "mwf", "mfm", "flo", "igx", "mif", "daf", "dis", "mbk",
"mqy", "msl", "plc", "txf", "mpn", "mpc", "xul", "cil", "cab",
"xls", "xlm", "xla", "xlc", "xlt", "xlb", "xll", "xlw", "xlam",
"xlam", "xlsb", "xlsb", "xlsm", "xlsm", "xltm", "xltm", "eot",
"chm", "ims", "lrm", "thmx", "msg", "sst", "pko", "cat", "stl",
"sst", "cat", "stl", "ppt", "pps", "pot", "ppa", "pwz", "ppam",
"ppam", "pptm", "potm", "pptm", "potm", "sldm", "sldm", "ppsm",
"ppsm", "potm", "potm", "mpp", "mpt", "docm", "docm", "dotm",
"dotm", "wps", "wks", "wcm", "wdb", "wpl", "xps", "mseq", "mus",
"msty", "taglet", "nlu", "ntf", "nitf", "nnd", "nns", "nnw",
"ncm", "ngdat", "n-gage", "rpst", "rpss", "rng", "edm", "edx",
"ext", "edm", "edx", "ext", "odc", "otc", "odb", "odf", "odft",
"odg", "otg", "odi", "oti", "odp", "otp", "ods", "ots", "odt",
"odm", "otm", "ott", "oth", "xo", "dd2", "oxt", "pptx", "sldx",
"ppsx", "potx", "xlsx", "xltx", "docx", "dotx", "mgp", "dp",
"esa", "pdb", "pqa", "oprc", "paw", "str", "ei6", "efif", "wg",
"plf", "pbd", "box", "mgz", "qps", "ptid", "qxd", "qxt", "qwd",
"qwt", "qxl", "qxb", "bed", "mxl", "musicxml", "cryptonote",
"cod", "rm", "rmvb", "rnx", "link66", "st", "see", "sema", "semd",
"semf", "ifm", "itp", "iif", "ipk", "twd", "twds", "mmf", "teacher",
"sdkm", "sdkd", "dxp", "sfs", "sdc", "sda", "sdd", "sdp", "smf",
"sdw", "vor", "sgl", "smzip", "sm", "sxc", "stc", "sxd", "std",
"sxi", "sti", "sxm", "sxw", "sxg", "stw", "sus", "susp", "svd",
"sis", "sisx", "xsm", "bdm", "xdm", "tao", "pcap", "cap", "dmp",
"tmo", "tpt", "mxs", "tra", "ufd", "ufdl", "utz", "umj", "unityweb",
"uoml", "vcx", "vsd", "vst", "vss", "vsw", "vis", "vsf", "sic",
"slc", "wbxml", "wmlc", "wmlsc", "wtb", "nbp", "wpd", "wqd",
"stf", "xar", "web", "xfdl", "hvd", "hvs", "hvp", "osf", "osfpvg",
"saf", "spf", "cmp", "zir", "zirz", "zaz", "vmd", "vmf", "vxml",
"wgt", "hlp", "wp", "wp5", "wp6", "wpd", "wp5", "w60", "wp5",
"w61", "wsdl", "wspolicy", "wk1", "wk", "7z", "abw", "ace", "aim",
"dmg", "aab", "x32", "u32", "vox", "aam", "aas", "bcpio", "bin",
"hqx", "torrent", "blb", "blorb", "bsh", "sh", "shar", "elc",
"elc", "bz", "bz2", "boz", "cbr", "cba", "cbt", "cbz", "cb7",
"cdf", "vcd", "cfs", "chat", "cha", "pgn", "chm", "crx", "ras",
"cco", "cpt", "z", "gz", "tgz", "z", "zip", "nsc", "cpio", "cpt",
"csh", "deb", "udeb", "deepv", "dgc", "dir", "dcr", "dxr", "cst",
"cct", "cxt", "w3d", "fgd", "swa", "dms", "wad", "ncx", "dtb",
"res", "dvi", "elc", "env", "evy", "es", "eva", "xla", "xlb",
"xlc", "xld", "xlk", "xll", "xlm", "xls", "xlt", "xlv", "xlw",
"flac", "pfa", "pfb", "gsf", "pcf", "pcf.Z", "bdf", "gsf", "psf",
"otf", "pcf", "snf", "ttf", "ttc", "pfa", "pfb", "pfm", "afm",
"woff", "mif", "arc", "pre", "spl", "gca", "ulx", "gnumeric",
"sgf", "gramps", "gcf", "gsp", "gss", "gtar", "tgz", "taz", "gz",
"gzip", "tgz", "hdf", "help", "hlp", "imap", "phtml", "pht",
"php", "phps", "php3", "php3p", "php4", "ica", "ima", "install",
"ins", "isp", "ins", "iv", "ip", "iii", "iso", "jar", "class",
"jcm", "jnlp", "ser", "class", "js", "chrt", "kil", "skd", "skm",
"skp", "skt", "kpr", "kpt", "ksh", "ksp", "kwd", "kwt", "latex",
"ltx", "lha", "lsp", "ivy", "wq1", "scm", "luac", "lzh", "lzh",
"lha", "lzx", "hqx", "bin", "mc$", "frm", "maker", "frame", "fm",
"fb", "book", "fbdoc", "mcd", "mm", "mid", "midi", "mie", "mif",
"nix", "prc", "mobi", "m3u8", "asx", "application", "lnk", "wmd",
"wmz", "xbap", "mdb", "obd", "crd", "clp", "com", "exe", "bat",
"dll", "exe", "dll", "com", "bat", "msi", "xla", "xls", "xlw",
"msi", "mvb", "m13", "m14", "wmf", "wmz", "emf", "emz", "mny",
"ppt", "pub", "scd", "trm", "wri", "ani", "nvd", "map", "stl",
"nc", "cdf", "pkg", "aos", "pac", "nwc", "nzb", "o", "omc", "omcd",
"omcr", "oza", "pm4", "pm5", "pcl", "pma", "pmc", "pml", "pmr",
"pmw", "plx", "p10", "p12", "pfx", "p7b", "spc", "p7r", "crl",
"p7c", "p7m", "p7a", "p7s", "css", "pnm", "mpc", "mpt", "mpv",
"mpx", "pyc", "pyo", "wb1", "qtl", "rar", "rpm", "ris", "rpm",
"rtf", "sdp", "sea", "sl", "sh", "shar", "sh", "swf", "swfl",
"xap", "sit", "spr", "sprite", "sql", "sit", "sitx", "srt", "sv4cpio",
"sv4crc", "t3", "gam", "tar", "sbk", "tbk", "tcl", "tex", "gf",
"pk", "tfm", "texinfo", "texi", "obj", "~", "%", "bak", "old",
"sik", "roff", "t", "tr", "man", "me", "ms", "avi", "ustar",
"vsd", "vst", "vsw", "mzz", "xpix", "vrml", "src", "wsrc", "webapp",
"wz", "hlp", "wtk", "svr", "wrl", "wpd", "wri", "der", "cer",
"crt", "crt", "xcf", "fig", "xlf", "xpi", "xz", "zip", "z1",
"z2", "z3", "z4", "z5", "z6", "z7", "z8", "xaml", "xdf", "xenc",
"xhtml", "xht", "xml", "xsl", "xpdl", "dtd", "xop", "xpl", "xslt",
"xspf", "mxml", "xhvml", "xvml", "xvm", "yang", "yin", "pko",
"zip", "adp", "aif", "aifc", "aiff", "au", "snd", "flac", "it",
"funk", "my", "pfunk", "pfunk", "rmi", "mid", "mid", "midi",
"kar", "rmi", "mod", "mp4a", "m4a", "mpga", "mp2", "mp2a", "mp3",
"m2a", "mpa", "mpg", "m3a", "mpega", "m4a", "mp3", "m3u", "la",
"lma", "oga", "ogg", "spx", "sid", "s3m", "sil", "tsi", "tsp",
"uva", "uvva", "eol", "dra", "dts", "dtshd", "lvp", "pya", "ecelp4800",
"ecelp7470", "ecelp9600", "qcp", "rip", "voc", "vox", "wav",
"weba", "aac", "snd", "aif", "aiff", "aifc", "au", "caf", "flac",
"gsd", "gsm", "jam", "lam", "mka", "mid", "midi", "mid", "midi",
"mod", "mp2", "mp3", "m3u", "m3u", "wax", "wma", "la", "lma",
"ram", "ra", "rm", "rmm", "rmp", "rmp", "ra", "rpm", "sid", "ra",
"pls", "sd2", "vqf", "vqe", "vql", "mjf", "voc", "wav", "xm",
"cdx", "cif", "cmdf", "cml", "csml", "pdb", "xyz", "xyz", "dwf",
"dwf", "otf", "ivr", "bmp", "bm", "cgm", "cod", "ras", "rast",
"fif", "flo", "turbot", "g3", "gif", "ief", "iefs", "jpeg", "jpg",
"jfif", "jfif-tbnl", "jpe", "jut", "ktx", "nap", "naplps", "pcx",
"pic", "pict", "jfif", "jfif", "jpe", "jpeg", "jpg", "png", "x-png",
"btif", "sgi", "svg", "svgz", "tiff", "tif", "mcf", "psd", "uvi",
"uvvi", "uvg", "uvvg", "djvu", "djv", "sub", "dwg", "dxf", "svf",
"dxf", "fbs", "fpx", "fpix", "fst", "mmr", "rlc", "mdi", "wdp",
"npx", "fpx", "rf", "rp", "wbmp", "xif", "webp", "3ds", "ras",
"ras", "cmx", "cdr", "pat", "cdt", "cpt", "dwg", "dxf", "svf",
"fh", "fhc", "fh4", "fh5", "fh7", "ico", "art", "jng", "jps",
"sid", "bmp", "nif", "niff", "pcx", "psd", "pic", "pct", "pnm",
"pbm", "pgm", "pgm", "ppm", "qif", "qti", "qtif", "rgb", "tga",
"tif", "tiff", "bmp", "xbm", "xpm", "xbm", "xpm", "pm", "xwd",
"xwd", "xbm", "xpm", "eml", "mht", "mhtml", "mime", "nws", "igs",
"iges", "msh", "mesh", "silo", "dae", "dwf", "gdl", "gtw", "mts",
"vtu", "wrl", "vrml", "wrz", "pov", "x3db", "x3dbz", "x3dv",
"x3dvz", "x3d", "x3dz", "gzip", "ustar", "zip", "mid", "midi",
"kar", "pvu", "asp", "appcache", "manifest", "ics", "ifb", "icz",
"csv", "css", "csv", "js", "event-stream", "323", "html", "acgi",
"htm", "htmls", "htx", "shtml", "stm", "uls", "js", "mml", "mcf",
"n3", "pas", "txt", "text", "conf", "def", "list", "log", "c",
"c++", "cc", "com", "cxx", "f", "f90", "for", "g", "h", "hh",
"idc", "jav", "java", "lst", "m", "mar", "pl", "sdml", "bas",
"in", "asc", "diff", "pot", "el", "ksh", "par", "dsc", "rtx",
"rt", "rtf", "rtf", "wsc", "sct", "wsc", "sgml", "sgm", "tsv",
"tm", "ts", "t", "tr", "roff", "man", "me", "ms", "ttl", "uri",
"uris", "uni", "unis", "urls", "vcard", "abc", "curl", "dcurl",
"mcurl", "scurl", "sub", "fly", "flx", "gv", "3dml", "spot",
"rt", "jad", "si", "sl", "wml", "wmls", "vtt", "htt", "s", "asm",
"aip", "c", "cc", "cxx", "cpp", "h", "hh", "dic", "h++", "hpp",
"hxx", "hh", "c++", "cpp", "cxx", "cc", "h", "htc", "csh", "c",
"f", "for", "f77", "f90", "h", "hh", "java", "java", "jav", "lsx",
"lua", "m", "markdown", "md", "mkd", "moc", "nfo", "opml", "p",
"pas", "gcd", "pl", "pm", "py", "hlb", "csh", "el", "scm", "ksh",
"lsp", "pl", "pm", "py", "rexx", "scm", "sh", "tcl", "tcsh",
"zsh", "shtml", "ssi", "etx", "sfv", "sgm", "sgml", "sh", "spc",
"talk", "tcl", "tk", "tex", "ltx", "sty", "cls", "uil", "uu",
"uue", "vcs", "vcf", "xml", "3gp", "3g2", "ts", "afl", "avi",
"avs", "dl", "flc", "fli", "flc", "fli", "gl", "h261", "h263",
"h264", "jpgv", "jpm", "jpgm", "mj2", "mjp2", "mp4", "mp4v",
"mpg4", "mpeg", "mpg", "mpe", "m1v", "m2v", "mp2", "mp3", "mpa",
"mpv2", "avi", "ogv", "qt", "moov", "mov", "vdo", "viv", "vivo",
"uvh", "uvvh", "uvm", "uvvm", "uvp", "uvvp", "uvs", "uvvs", "uvv",
"uvvv", "dvb", "fvt", "mxu", "m4u", "pyv", "rv", "uvu", "uvvu",
"viv", "vivo", "vos", "webm", "xdr", "xsr", "fmf", "dl", "dif",
"dv", "f4v", "fli", "flv", "gl", "isu", "lsf", "lsx", "m4v",
"mkv", "mk3d", "mks", "mng", "mjpg", "mp2", "mp3", "mp2", "asf",
"asx", "asr", "asx", "vob", "wm", "wmv", "wmx", "wvx", "avi",
"qtc", "scm", "movie", "mv", "smv", "wmf", "mime", "ice", "mid",
"midi", "3dm", "3dmf", "qd3", "qd3d", "svr", "vrml", "wrl", "wrz",
"flr", "xaf", "xof", "vrm", "vrt", "xgz", "xmz", "ma nb mb",
"doc dot", "bin dms lha lrf lzh so iso dmg dist distz pkg bpk dump elc deploy",
"onetoc onetoc2 onetmp onepkg", "asc sig", "p7m p7c", "ai eps ps",
"smi smil", "atc acutc", "c4g c4d c4f c4p c4u", "es3 et3", "seed dataless",
"fm frame maker book", "gex gre", "gqf gqs", "afp listafp list3820",
"icc icm", "xpw xpx", "ktz ktr", "kpr kpt", "kwd kwt", "kne knp",
"skp skd skt skm", "xls xlm xla xlc xlt xlw", "ppt pps pot",
"mpp mpt", "wps wks wcm wdb", "pdb pqa oprc", "qxd qxt qwd qwt qxl qxb",
"twd twds", "sdkm sdkd", "sus susp", "sis sisx", "ufd ufdl",
"vsd vst vss vsw", "zir zirz", "aab x32 u32 vox", "bz2 boz",
"deb udeb", "dir dcr dxr cst cct cxt w3d fgd swa", "ttf ttc",
"pfa pfb pfm afm", "prc mobi", "exe dll com bat msi", "mvb m13 m14",
"nc cdf", "p12 pfx", "p7b spc", "texinfo texi", "der crt", "xhtml xht",
"xml xsl", "mxml xhvml xvml xvm", "au snd", "mid midi kar rmi",
"mpga mp2 mp2a mp3 m2a m3a", "oga ogg spx", "aif aiff aifc",
"ram ra", "jpeg jpg jpe", "svg svgz", "tiff tif", "djvu djv",
"fh fhc fh4 fh5 fh7", "pic pct", "eml mime", "igs iges", "msh mesh silo",
"wrl vrml", "ics ifb", "html htm", "txt text conf def list log in",
"sgml sgm", "t tr roff man me ms", "uri uris urls", "s asm",
"c cc cxx cpp h hh dic", "f for f77 f90", "p pas", "jpm jpgm",
"mj2 mjp2", "mp4 mp4v mpg4", "mpeg mpg mpe m1v m2v", "qt mov",
"mxu m4u", "asf asx"), mime_type = c("application/x-bytecode.python",
"application/acad", "application/andrew-inset", "application/applixware",
"application/arj", "application/atom+xml", "application/atom+xml",
"application/atomcat+xml", "application/atomsvc+xml", "application/base64",
"application/base64", "application/binhex", "application/binhex4",
"application/book", "application/book", "application/ccxml+xml",
"application/cdf", "application/cdmi-capability", "application/cdmi-container",
"application/cdmi-domain", "application/cdmi-object", "application/cdmi-queue",
"application/clariscad", "application/commonground", "application/cu-seeme",
"application/cu-seeme", "application/davmount+xml", "application/docbook+xml",
"application/drafting", "application/dsptype", "application/dssc+der",
"application/dssc+xml", "application/dxf", "application/ecmascript",
"application/ecmascript", "application/ecmascript", "application/emma+xml",
"application/envoy", "application/epub+zip", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/exi",
"application/font-tdpfr", "application/font-woff", "application/fractals",
"application/freeloader", "application/futuresplash", "application/gml+xml",
"application/gnutar", "application/gpx+xml", "application/groupwise",
"application/gxf", "application/hlp", "application/hta", "application/hyperstudio",
"application/i-deas", "application/iges", "application/iges",
"application/inf", "application/inkml+xml", "application/inkml+xml",
"application/internet-property-stream", "application/ipfix",
"application/java", "application/java-archive", "application/java-byte-code",
"application/java-serialized-object", "application/java-vm",
"application/javascript", "application/json", "application/jsonml+json",
"application/lha", "application/lost+xml", "application/lzx",
"application/mac-binary", "application/mac-binhex", "application/mac-binhex40",
"application/mac-compactpro", "application/macbinary", "application/mads+xml",
"application/marc", "application/marcxml+xml", "application/mathematica",
"application/mathematica", "application/mathematica", "application/mathml+xml",
"application/mbedlet", "application/mbox", "application/mcad",
"application/mediaservercontrol+xml", "application/metalink+xml",
"application/metalink4+xml", "application/mets+xml", "application/mime",
"application/mods+xml", "application/mp21", "application/mp21",
"application/mp4", "application/mp4", "application/mp4", "application/msaccess",
"application/msonenote", "application/msonenote", "application/msonenote",
"application/msonenote", "application/mspowerpoint", "application/mspowerpoint",
"application/mspowerpoint", "application/mspowerpoint", "application/msword",
"application/msword", "application/msword", "application/msword",
"application/msword", "application/mswrite", "application/mxf",
"application/netmc", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/oda", "application/oebps-package+xml", "application/ogg",
"application/ogg", "application/olescript", "application/omdoc+xml",
"application/onenote", "application/onenote", "application/onenote",
"application/onenote", "application/oxps", "application/patch-ops-error+xml",
"application/pdf", "application/pgp-encrypted", "application/pgp-keys",
"application/pgp-signature", "application/pgp-signature", "application/pgp-signature",
"application/pics-rules", "application/pkcs-12", "application/pkcs-crl",
"application/pkcs10", "application/pkcs7-mime", "application/pkcs7-mime",
"application/pkcs7-signature", "application/pkcs8", "application/pkix-attr-cert",
"application/pkix-cert", "application/pkix-cert", "application/pkix-crl",
"application/pkix-pkipath", "application/pkixcmp", "application/plain",
"application/pls+xml", "application/postscript", "application/postscript",
"application/postscript", "application/powerpoint", "application/pro_eng",
"application/pro_eng", "application/prs.cww", "application/pskc+xml",
"application/rar", "application/rdf+xml", "application/reginfo+xml",
"application/relax-ng-compact-syntax", "application/resource-lists+xml",
"application/resource-lists-diff+xml", "application/ringing-tones",
"application/rls-services+xml", "application/rpki-ghostbusters",
"application/rpki-manifest", "application/rpki-roa", "application/rsd+xml",
"application/rss+xml", "application/rss+xml", "application/rtf",
"application/rtf", "application/sbml+xml", "application/scvp-cv-request",
"application/scvp-cv-response", "application/scvp-vp-request",
"application/scvp-vp-response", "application/sdp", "application/sea",
"application/set", "application/set-payment-initiation", "application/set-registration-initiation",
"application/shf+xml", "application/sla", "application/smil",
"application/smil", "application/smil+xml", "application/smil+xml",
"application/solids", "application/sounder", "application/sparql-query",
"application/sparql-results+xml", "application/srgs", "application/srgs+xml",
"application/sru+xml", "application/ssdl+xml", "application/ssml+xml",
"application/step", "application/step", "application/streamingmedia",
"application/tei+xml", "application/tei+xml", "application/thraud+xml",
"application/timestamped-data", "application/toolbook", "application/vda",
"application/vnd.3gpp.pic-bw-large", "application/vnd.3gpp.pic-bw-small",
"application/vnd.3gpp.pic-bw-var", "application/vnd.3gpp2.tcap",
"application/vnd.3m.post-it-notes", "application/vnd.accpac.simply.aso",
"application/vnd.accpac.simply.imp", "application/vnd.acucobol",
"application/vnd.acucorp", "application/vnd.acucorp", "application/vnd.adobe.air-application-installer-package+zip",
"application/vnd.adobe.formscentral.fcdt", "application/vnd.adobe.fxp",
"application/vnd.adobe.fxp", "application/vnd.adobe.xdp+xml",
"application/vnd.adobe.xfdf", "application/vnd.ahead.space",
"application/vnd.airzip.filesecure.azf", "application/vnd.airzip.filesecure.azs",
"application/vnd.amazon.ebook", "application/vnd.americandynamics.acc",
"application/vnd.amiga.ami", "application/vnd.android.package-archive",
"application/vnd.anser-web-certificate-issue-initiation", "application/vnd.anser-web-funds-transfer-initiation",
"application/vnd.antix.game-component", "application/vnd.apple.installer+xml",
"application/vnd.apple.mpegurl", "application/vnd.arastra.swi",
"application/vnd.aristanetworks.swi", "application/vnd.astraea-software.iota",
"application/vnd.audiograph", "application/vnd.blueice.multipass",
"application/vnd.bmi", "application/vnd.businessobjects", "application/vnd.chemdraw+xml",
"application/vnd.chipnuts.karaoke-mmd", "application/vnd.cinderella",
"application/vnd.claymore", "application/vnd.cloanto.rp9", "application/vnd.clonk.c4group",
"application/vnd.clonk.c4group", "application/vnd.clonk.c4group",
"application/vnd.clonk.c4group", "application/vnd.clonk.c4group",
"application/vnd.cluetrust.cartomobile-config", "application/vnd.cluetrust.cartomobile-config-pkg",
"application/vnd.commonspace", "application/vnd.contact.cmsg",
"application/vnd.cosmocaller", "application/vnd.crick.clicker",
"application/vnd.crick.clicker.keyboard", "application/vnd.crick.clicker.palette",
"application/vnd.crick.clicker.template", "application/vnd.crick.clicker.wordbank",
"application/vnd.criticaltools.wbs+xml", "application/vnd.ctc-posml",
"application/vnd.cups-ppd", "application/vnd.curl.car", "application/vnd.curl.pcurl",
"application/vnd.dart", "application/vnd.data-vision.rdz", "application/vnd.dece.data",
"application/vnd.dece.data", "application/vnd.dece.data", "application/vnd.dece.data",
"application/vnd.dece.ttml+xml", "application/vnd.dece.ttml+xml",
"application/vnd.dece.unspecified", "application/vnd.dece.unspecified",
"application/vnd.dece.zip", "application/vnd.dece.zip", "application/vnd.denovo.fcselayout-link",
"application/vnd.dna", "application/vnd.dolby.mlp", "application/vnd.dpgraph",
"application/vnd.dreamfactory", "application/vnd.ds-keypoint",
"application/vnd.dvb.ait", "application/vnd.dvb.service", "application/vnd.dynageo",
"application/vnd.ecowin.chart", "application/vnd.enliven", "application/vnd.epson.esf",
"application/vnd.epson.msf", "application/vnd.epson.quickanime",
"application/vnd.epson.salt", "application/vnd.epson.ssf", "application/vnd.eszigno3+xml",
"application/vnd.eszigno3+xml", "application/vnd.ezpix-album",
"application/vnd.ezpix-package", "application/vnd.fdf", "application/vnd.fdsn.mseed",
"application/vnd.fdsn.seed", "application/vnd.fdsn.seed", "application/vnd.flographit",
"application/vnd.fluxtime.clip", "application/vnd.framemaker",
"application/vnd.framemaker", "application/vnd.framemaker", "application/vnd.framemaker",
"application/vnd.frogans.fnc", "application/vnd.frogans.ltf",
"application/vnd.fsc.weblaunch", "application/vnd.fujitsu.oasys",
"application/vnd.fujitsu.oasys2", "application/vnd.fujitsu.oasys3",
"application/vnd.fujitsu.oasysgp", "application/vnd.fujitsu.oasysprs",
"application/vnd.fujixerox.ddd", "application/vnd.fujixerox.docuworks",
"application/vnd.fujixerox.docuworks.binder", "application/vnd.fuzzysheet",
"application/vnd.genomatix.tuxedo", "application/vnd.geogebra.file",
"application/vnd.geogebra.tool", "application/vnd.geometry-explorer",
"application/vnd.geometry-explorer", "application/vnd.geonext",
"application/vnd.geoplan", "application/vnd.geospace", "application/vnd.gmx",
"application/vnd.google-earth.kml+xml", "application/vnd.google-earth.kmz",
"application/vnd.grafeq", "application/vnd.grafeq", "application/vnd.groove-account",
"application/vnd.groove-help", "application/vnd.groove-identity-message",
"application/vnd.groove-injector", "application/vnd.groove-tool-message",
"application/vnd.groove-tool-template", "application/vnd.groove-vcard",
"application/vnd.hal+xml", "application/vnd.handheld-entertainment+xml",
"application/vnd.hbci", "application/vnd.hhe.lesson-player",
"application/vnd.hp-hpgl", "application/vnd.hp-hpgl", "application/vnd.hp-hpgl",
"application/vnd.hp-hpid", "application/vnd.hp-hps", "application/vnd.hp-jlyt",
"application/vnd.hp-pcl", "application/vnd.hp-pclxl", "application/vnd.hydrostatix.sof-data",
"application/vnd.hzn-3d-crossword", "application/vnd.ibm.minipay",
"application/vnd.ibm.modcap", "application/vnd.ibm.modcap", "application/vnd.ibm.modcap",
"application/vnd.ibm.rights-management", "application/vnd.ibm.secure-container",
"application/vnd.iccprofile", "application/vnd.iccprofile", "application/vnd.igloader",
"application/vnd.immervision-ivp", "application/vnd.immervision-ivu",
"application/vnd.insors.igm", "application/vnd.intercon.formnet",
"application/vnd.intercon.formnet", "application/vnd.intergeo",
"application/vnd.intu.qbo", "application/vnd.intu.qfx", "application/vnd.ipunplugged.rcprofile",
"application/vnd.irepository.package+xml", "application/vnd.is-xpr",
"application/vnd.isac.fcs", "application/vnd.jam", "application/vnd.jcp.javame.midlet-rms",
"application/vnd.jisp", "application/vnd.joost.joda-archive",
"application/vnd.kahootz", "application/vnd.kahootz", "application/vnd.kde.karbon",
"application/vnd.kde.kchart", "application/vnd.kde.kformula",
"application/vnd.kde.kivio", "application/vnd.kde.kontour", "application/vnd.kde.kpresenter",
"application/vnd.kde.kpresenter", "application/vnd.kde.kspread",
"application/vnd.kde.kword", "application/vnd.kde.kword", "application/vnd.kenameaapp",
"application/vnd.kidspiration", "application/vnd.kinar", "application/vnd.kinar",
"application/vnd.koan", "application/vnd.koan", "application/vnd.koan",
"application/vnd.koan", "application/vnd.kodak-descriptor", "application/vnd.las.las+xml",
"application/vnd.llamagraphics.life-balance.desktop", "application/vnd.llamagraphics.life-balance.exchange+xml",
"application/vnd.lotus-1-2-3", "application/vnd.lotus-approach",
"application/vnd.lotus-freelance", "application/vnd.lotus-notes",
"application/vnd.lotus-organizer", "application/vnd.lotus-screencam",
"application/vnd.lotus-wordpro", "application/vnd.macports.portpkg",
"application/vnd.mcd", "application/vnd.medcalcdata", "application/vnd.mediastation.cdkey",
"application/vnd.mfer", "application/vnd.mfmp", "application/vnd.micrografx.flo",
"application/vnd.micrografx.igx", "application/vnd.mif", "application/vnd.mobius.daf",
"application/vnd.mobius.dis", "application/vnd.mobius.mbk", "application/vnd.mobius.mqy",
"application/vnd.mobius.msl", "application/vnd.mobius.plc", "application/vnd.mobius.txf",
"application/vnd.mophun.application", "application/vnd.mophun.certificate",
"application/vnd.mozilla.xul+xml", "application/vnd.ms-artgalry",
"application/vnd.ms-cab-compressed", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel.addin.macroEnabled.12",
"application/vnd.ms-excel.addin.macroenabled.12", "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
"application/vnd.ms-excel.sheet.binary.macroenabled.12", "application/vnd.ms-excel.sheet.macroEnabled.12",
"application/vnd.ms-excel.sheet.macroenabled.12", "application/vnd.ms-excel.template.macroEnabled.12",
"application/vnd.ms-excel.template.macroenabled.12", "application/vnd.ms-fontobject",
"application/vnd.ms-htmlhelp", "application/vnd.ms-ims", "application/vnd.ms-lrm",
"application/vnd.ms-officetheme", "application/vnd.ms-outlook",
"application/vnd.ms-pki.certstore", "application/vnd.ms-pki.pko",
"application/vnd.ms-pki.seccat", "application/vnd.ms-pki.stl",
"application/vnd.ms-pkicertstore", "application/vnd.ms-pkiseccat",
"application/vnd.ms-pkistl", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint.addin.macroEnabled.12", "application/vnd.ms-powerpoint.addin.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroEnabled.12",
"application/vnd.ms-powerpoint.presentation.macroEnabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12",
"application/vnd.ms-powerpoint.slide.macroEnabled.12", "application/vnd.ms-powerpoint.slide.macroenabled.12",
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12", "application/vnd.ms-powerpoint.slideshow.macroenabled.12",
"application/vnd.ms-powerpoint.template.macroEnabled.12", "application/vnd.ms-powerpoint.template.macroenabled.12",
"application/vnd.ms-project", "application/vnd.ms-project", "application/vnd.ms-word.document.macroEnabled.12",
"application/vnd.ms-word.document.macroenabled.12", "application/vnd.ms-word.template.macroEnabled.12",
"application/vnd.ms-word.template.macroenabled.12", "application/vnd.ms-works",
"application/vnd.ms-works", "application/vnd.ms-works", "application/vnd.ms-works",
"application/vnd.ms-wpl", "application/vnd.ms-xpsdocument", "application/vnd.mseq",
"application/vnd.musician", "application/vnd.muvee.style", "application/vnd.mynfc",
"application/vnd.neurolanguage.nlu", "application/vnd.nitf",
"application/vnd.nitf", "application/vnd.noblenet-directory",
"application/vnd.noblenet-sealer", "application/vnd.noblenet-web",
"application/vnd.nokia.configuration-message", "application/vnd.nokia.n-gage.data",
"application/vnd.nokia.n-gage.symbian.install", "application/vnd.nokia.radio-preset",
"application/vnd.nokia.radio-presets", "application/vnd.nokia.ringing-tone",
"application/vnd.novadigm.EDM", "application/vnd.novadigm.EDX",
"application/vnd.novadigm.EXT", "application/vnd.novadigm.edm",
"application/vnd.novadigm.edx", "application/vnd.novadigm.ext",
"application/vnd.oasis.opendocument.chart", "application/vnd.oasis.opendocument.chart-template",
"application/vnd.oasis.opendocument.database", "application/vnd.oasis.opendocument.formula",
"application/vnd.oasis.opendocument.formula-template", "application/vnd.oasis.opendocument.graphics",
"application/vnd.oasis.opendocument.graphics-template", "application/vnd.oasis.opendocument.image",
"application/vnd.oasis.opendocument.image-template", "application/vnd.oasis.opendocument.presentation",
"application/vnd.oasis.opendocument.presentation-template", "application/vnd.oasis.opendocument.spreadsheet",
"application/vnd.oasis.opendocument.spreadsheet-template", "application/vnd.oasis.opendocument.text",
"application/vnd.oasis.opendocument.text-master", "application/vnd.oasis.opendocument.text-master",
"application/vnd.oasis.opendocument.text-template", "application/vnd.oasis.opendocument.text-web",
"application/vnd.olpc-sugar", "application/vnd.oma.dd2+xml",
"application/vnd.openofficeorg.extension", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.openxmlformats-officedocument.presentationml.slide",
"application/vnd.openxmlformats-officedocument.presentationml.slideshow",
"application/vnd.openxmlformats-officedocument.presentationml.template",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template",
"application/vnd.osgeo.mapguide.package", "application/vnd.osgi.dp",
"application/vnd.osgi.subsystem", "application/vnd.palm", "application/vnd.palm",
"application/vnd.palm", "application/vnd.pawaafile", "application/vnd.pg.format",
"application/vnd.pg.osasli", "application/vnd.picsel", "application/vnd.pmi.widget",
"application/vnd.pocketlearn", "application/vnd.powerbuilder6",
"application/vnd.previewsystems.box", "application/vnd.proteus.magazine",
"application/vnd.publishare-delta-tree", "application/vnd.pvi.ptid1",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.realvnc.bed", "application/vnd.recordare.musicxml",
"application/vnd.recordare.musicxml+xml", "application/vnd.rig.cryptonote",
"application/vnd.rim.cod", "application/vnd.rn-realmedia", "application/vnd.rn-realmedia-vbr",
"application/vnd.rn-realplayer", "application/vnd.route66.link66+xml",
"application/vnd.sailingtracker.track", "application/vnd.seemail",
"application/vnd.sema", "application/vnd.semd", "application/vnd.semf",
"application/vnd.shana.informed.formdata", "application/vnd.shana.informed.formtemplate",
"application/vnd.shana.informed.interchange", "application/vnd.shana.informed.package",
"application/vnd.simtech-mindmapper", "application/vnd.simtech-mindmapper",
"application/vnd.smaf", "application/vnd.smart.teacher", "application/vnd.solent.sdkm+xml",
"application/vnd.solent.sdkm+xml", "application/vnd.spotfire.dxp",
"application/vnd.spotfire.sfs", "application/vnd.stardivision.calc",
"application/vnd.stardivision.draw", "application/vnd.stardivision.impress",
"application/vnd.stardivision.impress", "application/vnd.stardivision.math",
"application/vnd.stardivision.writer", "application/vnd.stardivision.writer",
"application/vnd.stardivision.writer-global", "application/vnd.stepmania.package",
"application/vnd.stepmania.stepchart", "application/vnd.sun.xml.calc",
"application/vnd.sun.xml.calc.template", "application/vnd.sun.xml.draw",
"application/vnd.sun.xml.draw.template", "application/vnd.sun.xml.impress",
"application/vnd.sun.xml.impress.template", "application/vnd.sun.xml.math",
"application/vnd.sun.xml.writer", "application/vnd.sun.xml.writer.global",
"application/vnd.sun.xml.writer.template", "application/vnd.sus-calendar",
"application/vnd.sus-calendar", "application/vnd.svd", "application/vnd.symbian.install",
"application/vnd.symbian.install", "application/vnd.syncml+xml",
"application/vnd.syncml.dm+wbxml", "application/vnd.syncml.dm+xml",
"application/vnd.tao.intent-module-archive", "application/vnd.tcpdump.pcap",
"application/vnd.tcpdump.pcap", "application/vnd.tcpdump.pcap",
"application/vnd.tmobile-livetv", "application/vnd.trid.tpt",
"application/vnd.triscape.mxs", "application/vnd.trueapp", "application/vnd.ufdl",
"application/vnd.ufdl", "application/vnd.uiq.theme", "application/vnd.umajin",
"application/vnd.unity", "application/vnd.uoml+xml", "application/vnd.vcx",
"application/vnd.visio", "application/vnd.visio", "application/vnd.visio",
"application/vnd.visio", "application/vnd.visionary", "application/vnd.vsf",
"application/vnd.wap.sic", "application/vnd.wap.slc", "application/vnd.wap.wbxml",
"application/vnd.wap.wmlc", "application/vnd.wap.wmlscriptc",
"application/vnd.webturbo", "application/vnd.wolfram.player",
"application/vnd.wordperfect", "application/vnd.wqd", "application/vnd.wt.stf",
"application/vnd.xara", "application/vnd.xara", "application/vnd.xfdl",
"application/vnd.yamaha.hv-dic", "application/vnd.yamaha.hv-script",
"application/vnd.yamaha.hv-voice", "application/vnd.yamaha.openscoreformat",
"application/vnd.yamaha.openscoreformat.osfpvg+xml", "application/vnd.yamaha.smaf-audio",
"application/vnd.yamaha.smaf-phrase", "application/vnd.yellowriver-custom-menu",
"application/vnd.zul", "application/vnd.zul", "application/vnd.zzazz.deck+xml",
"application/vocaltec-media-desc", "application/vocaltec-media-file",
"application/voicexml+xml", "application/widget", "application/winhlp",
"application/wordperfect", "application/wordperfect", "application/wordperfect",
"application/wordperfect", "application/wordperfect5.1", "application/wordperfect6.0",
"application/wordperfect6.0", "application/wordperfect6.1", "application/wsdl+xml",
"application/wspolicy+xml", "application/x-123", "application/x-123",
"application/x-7z-compressed", "application/x-abiword", "application/x-ace-compressed",
"application/x-aim", "application/x-apple-diskimage", "application/x-authorware-bin",
"application/x-authorware-bin", "application/x-authorware-bin",
"application/x-authorware-bin", "application/x-authorware-map",
"application/x-authorware-seg", "application/x-bcpio", "application/x-binary",
"application/x-binhex40", "application/x-bittorrent", "application/x-blorb",
"application/x-blorb", "application/x-bsh", "application/x-bsh",
"application/x-bsh", "application/x-bytecode.elisp", "application/x-bytecode.elisp(compiledelisp)",
"application/x-bzip", "application/x-bzip2", "application/x-bzip2",
"application/x-cbr", "application/x-cbr", "application/x-cbr",
"application/x-cbr", "application/x-cbr", "application/x-cdf",
"application/x-cdlink", "application/x-cfs-compressed", "application/x-chat",
"application/x-chat", "application/x-chess-pgn", "application/x-chm",
"application/x-chrome-extension", "application/x-cmu-raster",
"application/x-cocoa", "application/x-compactpro", "application/x-compress",
"application/x-compressed", "application/x-compressed", "application/x-compressed",
"application/x-compressed", "application/x-conference", "application/x-cpio",
"application/x-cpt", "application/x-csh", "application/x-debian-package",
"application/x-debian-package", "application/x-deepv", "application/x-dgc-compressed",
"application/x-director", "application/x-director", "application/x-director",
"application/x-director", "application/x-director", "application/x-director",
"application/x-director", "application/x-director", "application/x-director",
"application/x-dms", "application/x-doom", "application/x-dtbncx+xml",
"application/x-dtbook+xml", "application/x-dtbresource+xml",
"application/x-dvi", "application/x-elc", "application/x-envoy",
"application/x-envoy", "application/x-esrehber", "application/x-eva",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-flac",
"application/x-font", "application/x-font", "application/x-font",
"application/x-font", "application/x-font", "application/x-font-bdf",
"application/x-font-ghostscript", "application/x-font-linux-psf",
"application/x-font-otf", "application/x-font-pcf", "application/x-font-snf",
"application/x-font-ttf", "application/x-font-ttf", "application/x-font-type1",
"application/x-font-type1", "application/x-font-type1", "application/x-font-type1",
"application/x-font-woff", "application/x-frame", "application/x-freearc",
"application/x-freelance", "application/x-futuresplash", "application/x-gca-compressed",
"application/x-glulx", "application/x-gnumeric", "application/x-go-sgf",
"application/x-gramps-xml", "application/x-graphing-calculator",
"application/x-gsp", "application/x-gss", "application/x-gtar",
"application/x-gtar", "application/x-gtar", "application/x-gzip",
"application/x-gzip", "application/x-gzip", "application/x-hdf",
"application/x-helpfile", "application/x-helpfile", "application/x-httpd-imap",
"application/x-httpd-php", "application/x-httpd-php", "application/x-httpd-php",
"application/x-httpd-php-source", "application/x-httpd-php3",
"application/x-httpd-php3-preprocessed", "application/x-httpd-php4",
"application/x-ica", "application/x-ima", "application/x-install-instructions",
"application/x-internet-signup", "application/x-internet-signup",
"application/x-internett-signup", "application/x-inventor", "application/x-ip2",
"application/x-iphone", "application/x-iso9660-image", "application/x-java-archive",
"application/x-java-class", "application/x-java-commerce", "application/x-java-jnlp-file",
"application/x-java-serialized-object", "application/x-java-vm",
"application/x-javascript", "application/x-kchart", "application/x-killustrator",
"application/x-koan", "application/x-koan", "application/x-koan",
"application/x-koan", "application/x-kpresenter", "application/x-kpresenter",
"application/x-ksh", "application/x-kspread", "application/x-kword",
"application/x-kword", "application/x-latex", "application/x-latex",
"application/x-lha", "application/x-lisp", "application/x-livescreen",
"application/x-lotus", "application/x-lotusscreencam", "application/x-lua-bytecode",
"application/x-lzh", "application/x-lzh-compressed", "application/x-lzh-compressed",
"application/x-lzx", "application/x-mac-binhex40", "application/x-macbinary",
"application/x-magic-cap-package-1.0", "application/x-maker",
"application/x-maker", "application/x-maker", "application/x-maker",
"application/x-maker", "application/x-maker", "application/x-maker",
"application/x-mathcad", "application/x-meme", "application/x-midi",
"application/x-midi", "application/x-mie", "application/x-mif",
"application/x-mix-transfer", "application/x-mobipocket-ebook",
"application/x-mobipocket-ebook", "application/x-mpegURL", "application/x-mplayer2",
"application/x-ms-application", "application/x-ms-shortcut",
"application/x-ms-wmd", "application/x-ms-wmz", "application/x-ms-xbap",
"application/x-msaccess", "application/x-msbinder", "application/x-mscardfile",
"application/x-msclip", "application/x-msdos-program", "application/x-msdos-program",
"application/x-msdos-program", "application/x-msdos-program",
"application/x-msdownload", "application/x-msdownload", "application/x-msdownload",
"application/x-msdownload", "application/x-msdownload", "application/x-msexcel",
"application/x-msexcel", "application/x-msexcel", "application/x-msi",
"application/x-msmediaview", "application/x-msmediaview", "application/x-msmediaview",
"application/x-msmetafile", "application/x-msmetafile", "application/x-msmetafile",
"application/x-msmetafile", "application/x-msmoney", "application/x-mspowerpoint",
"application/x-mspublisher", "application/x-msschedule", "application/x-msterminal",
"application/x-mswrite", "application/x-navi-animation", "application/x-navidoc",
"application/x-navimap", "application/x-navistyle", "application/x-netcdf",
"application/x-netcdf", "application/x-newton-compatible-pkg",
"application/x-nokia-9000-communicator-add-on-software", "application/x-ns-proxy-autoconfig",
"application/x-nwc", "application/x-nzb", "application/x-object",
"application/x-omc", "application/x-omcdatamaker", "application/x-omcregerator",
"application/x-oz-application", "application/x-pagemaker", "application/x-pagemaker",
"application/x-pcl", "application/x-perfmon", "application/x-perfmon",
"application/x-perfmon", "application/x-perfmon", "application/x-perfmon",
"application/x-pixclscript", "application/x-pkcs10", "application/x-pkcs12",
"application/x-pkcs12", "application/x-pkcs7-certificates", "application/x-pkcs7-certificates",
"application/x-pkcs7-certreqresp", "application/x-pkcs7-crl",
"application/x-pkcs7-mime", "application/x-pkcs7-mime", "application/x-pkcs7-signature",
"application/x-pkcs7-signature", "application/x-pointplus", "application/x-portable-anymap",
"application/x-project", "application/x-project", "application/x-project",
"application/x-project", "application/x-python-code", "application/x-python-code",
"application/x-qpro", "application/x-quicktimeplayer", "application/x-rar-compressed",
"application/x-redhat-package-manager", "application/x-research-info-systems",
"application/x-rpm", "application/x-rtf", "application/x-sdp",
"application/x-sea", "application/x-seelogo", "application/x-sh",
"application/x-shar", "application/x-shar", "application/x-shockwave-flash",
"application/x-shockwave-flash", "application/x-silverlight-app",
"application/x-sit", "application/x-sprite", "application/x-sprite",
"application/x-sql", "application/x-stuffit", "application/x-stuffitx",
"application/x-subrip", "application/x-sv4cpio", "application/x-sv4crc",
"application/x-t3vm-image", "application/x-tads", "application/x-tar",
"application/x-tbook", "application/x-tbook", "application/x-tcl",
"application/x-tex", "application/x-tex-gf", "application/x-tex-pk",
"application/x-tex-tfm", "application/x-texinfo", "application/x-texinfo",
"application/x-tgif", "application/x-trash", "application/x-trash",
"application/x-trash", "application/x-trash", "application/x-trash",
"application/x-troff", "application/x-troff", "application/x-troff",
"application/x-troff-man", "application/x-troff-me", "application/x-troff-ms",
"application/x-troff-msvideo", "application/x-ustar", "application/x-visio",
"application/x-visio", "application/x-visio", "application/x-vnd.audioexplosion.mzz",
"application/x-vnd.ls-xpix", "application/x-vrml", "application/x-wais-source",
"application/x-wais-source", "application/x-web-app-manifest+json",
"application/x-wingz", "application/x-winhelp", "application/x-wintalk",
"application/x-world", "application/x-world", "application/x-wpwin",
"application/x-wri", "application/x-x509-ca-cert", "application/x-x509-ca-cert",
"application/x-x509-ca-cert", "application/x-x509-user-cert",
"application/x-xcf", "application/x-xfig", "application/x-xliff+xml",
"application/x-xpinstall", "application/x-xz", "application/x-zip-compressed",
"application/x-zmachine", "application/x-zmachine", "application/x-zmachine",
"application/x-zmachine", "application/x-zmachine", "application/x-zmachine",
"application/x-zmachine", "application/x-zmachine", "application/xaml+xml",
"application/xcap-diff+xml", "application/xenc+xml", "application/xhtml+xml",
"application/xhtml+xml", "application/xml", "application/xml",
"application/xml", "application/xml-dtd", "application/xop+xml",
"application/xproc+xml", "application/xslt+xml", "application/xspf+xml",
"application/xv+xml", "application/xv+xml", "application/xv+xml",
"application/xv+xml", "application/yang", "application/yin+xml",
"application/ynd.ms-pkipko", "application/zip", "audio/adpcm",
"audio/aiff", "audio/aiff", "audio/aiff", "audio/basic", "audio/basic",
"audio/flac", "audio/it", "audio/make", "audio/make", "audio/make",
"audio/make.my.funk", "audio/mid", "audio/mid", "audio/midi",
"audio/midi", "audio/midi", "audio/midi", "audio/mod", "audio/mp4",
"audio/mp4", "audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg",
"audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg",
"audio/mpeg", "audio/mpeg3", "audio/mpegurl", "audio/nspaudio",
"audio/nspaudio", "audio/ogg", "audio/ogg", "audio/ogg", "audio/prs.sid",
"audio/s3m", "audio/silk", "audio/tsp-audio", "audio/tsplayer",
"audio/vnd.dece.audio", "audio/vnd.dece.audio", "audio/vnd.digital-winds",
"audio/vnd.dra", "audio/vnd.dts", "audio/vnd.dts.hd", "audio/vnd.lucent.voice",
"audio/vnd.ms-playready.media.pya", "audio/vnd.nuera.ecelp4800",
"audio/vnd.nuera.ecelp7470", "audio/vnd.nuera.ecelp9600", "audio/vnd.qcelp",
"audio/vnd.rip", "audio/voc", "audio/voxware", "audio/wav", "audio/webm",
"audio/x-aac", "audio/x-adpcm", "audio/x-aiff", "audio/x-aiff",
"audio/x-aiff", "audio/x-au", "audio/x-caf", "audio/x-flac",
"audio/x-gsm", "audio/x-gsm", "audio/x-jam", "audio/x-liveaudio",
"audio/x-matroska", "audio/x-mid", "audio/x-mid", "audio/x-midi",
"audio/x-midi", "audio/x-mod", "audio/x-mpeg", "audio/x-mpeg-3",
"audio/x-mpegurl", "audio/x-mpequrl", "audio/x-ms-wax", "audio/x-ms-wma",
"audio/x-nspaudio", "audio/x-nspaudio", "audio/x-pn-realaudio",
"audio/x-pn-realaudio", "audio/x-pn-realaudio", "audio/x-pn-realaudio",
"audio/x-pn-realaudio", "audio/x-pn-realaudio-plugin", "audio/x-pn-realaudio-plugin",
"audio/x-pn-realaudio-plugin", "audio/x-psid", "audio/x-realaudio",
"audio/x-scpls", "audio/x-sd2", "audio/x-twinvq", "audio/x-twinvq-plugin",
"audio/x-twinvq-plugin", "audio/x-vnd.audioexplosion.mjuicemediafile",
"audio/x-voc", "audio/x-wav", "audio/xm", "chemical/x-cdx", "chemical/x-cif",
"chemical/x-cmdf", "chemical/x-cml", "chemical/x-csml", "chemical/x-pdb",
"chemical/x-pdb", "chemical/x-xyz", "drawing/x-dwf", "drawing/x-dwf(old)",
"font/opentype", "i-world/i-vrml", "image/bmp", "image/bmp",
"image/cgm", "image/cis-cod", "image/cmu-raster", "image/cmu-raster",
"image/fif", "image/florian", "image/florian", "image/g3fax",
"image/gif", "image/ief", "image/ief", "image/jpeg", "image/jpeg",
"image/jpeg", "image/jpeg", "image/jpeg", "image/jutvision",
"image/ktx", "image/naplps", "image/naplps", "image/pcx", "image/pict",
"image/pict", "image/pipeg", "image/pjpeg", "image/pjpeg", "image/pjpeg",
"image/pjpeg", "image/png", "image/png", "image/prs.btif", "image/sgi",
"image/svg+xml", "image/svg+xml", "image/tiff", "image/tiff",
"image/vasa", "image/vnd.adobe.photoshop", "image/vnd.dece.graphic",
"image/vnd.dece.graphic", "image/vnd.dece.graphic", "image/vnd.dece.graphic",
"image/vnd.djvu", "image/vnd.djvu", "image/vnd.dvb.subtitle",
"image/vnd.dwg", "image/vnd.dwg", "image/vnd.dwg", "image/vnd.dxf",
"image/vnd.fastbidsheet", "image/vnd.fpx", "image/vnd.fpx", "image/vnd.fst",
"image/vnd.fujixerox.edmics-mmr", "image/vnd.fujixerox.edmics-rlc",
"image/vnd.ms-modi", "image/vnd.ms-photo", "image/vnd.net-fpx",
"image/vnd.net-fpx", "image/vnd.rn-realflash", "image/vnd.rn-realpix",
"image/vnd.wap.wbmp", "image/vnd.xiff", "image/webp", "image/x-3ds",
"image/x-cmu-rast", "image/x-cmu-raster", "image/x-cmx", "image/x-coreldraw",
"image/x-coreldrawpattern", "image/x-coreldrawtemplate", "image/x-corelphotopaint",
"image/x-dwg", "image/x-dwg", "image/x-dwg", "image/x-freehand",
"image/x-freehand", "image/x-freehand", "image/x-freehand", "image/x-freehand",
"image/x-icon", "image/x-jg", "image/x-jng", "image/x-jps", "image/x-mrsid-image",
"image/x-ms-bmp", "image/x-niff", "image/x-niff", "image/x-pcx",
"image/x-photoshop", "image/x-pict", "image/x-pict", "image/x-portable-anymap",
"image/x-portable-bitmap", "image/x-portable-graymap", "image/x-portable-greymap",
"image/x-portable-pixmap", "image/x-quicktime", "image/x-quicktime",
"image/x-quicktime", "image/x-rgb", "image/x-tga", "image/x-tiff",
"image/x-tiff", "image/x-windows-bmp", "image/x-xbitmap", "image/x-xbitmap",
"image/x-xbm", "image/x-xpixmap", "image/x-xpixmap", "image/x-xwd",
"image/x-xwindowdump", "image/xbm", "image/xpm", "message/rfc822",
"message/rfc822", "message/rfc822", "message/rfc822", "message/rfc822",
"model/iges", "model/iges", "model/mesh", "model/mesh", "model/mesh",
"model/vnd.collada+xml", "model/vnd.dwf", "model/vnd.gdl", "model/vnd.gtw",
"model/vnd.mts", "model/vnd.vtu", "model/vrml", "model/vrml",
"model/vrml", "model/x-pov", "model/x3d+binary", "model/x3d+binary",
"model/x3d+vrml", "model/x3d+vrml", "model/x3d+xml", "model/x3d+xml",
"multipart/x-gzip", "multipart/x-ustar", "multipart/x-zip", "music/crescendo",
"music/crescendo", "music/x-karaoke", "paleovu/x-pv", "text/asp",
"text/cache-manifest", "text/cache-manifest", "text/calendar",
"text/calendar", "text/calendar", "text/comma-separated-values",
"text/css", "text/csv", "text/ecmascript", "text/event-stream",
"text/h323", "text/html", "text/html", "text/html", "text/html",
"text/html", "text/html", "text/html", "text/iuls", "text/javascript",
"text/mathml", "text/mcf", "text/n3", "text/pascal", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain-bas", "text/prs.lines.tag", "text/richtext",
"text/richtext", "text/richtext", "text/rtf", "text/scriplet",
"text/scriptlet", "text/scriptlet", "text/sgml", "text/sgml",
"text/tab-separated-values", "text/texmacs", "text/texmacs",
"text/troff", "text/troff", "text/troff", "text/troff", "text/troff",
"text/troff", "text/turtle", "text/uri-list", "text/uri-list",
"text/uri-list", "text/uri-list", "text/uri-list", "text/vcard",
"text/vnd.abc", "text/vnd.curl", "text/vnd.curl.dcurl", "text/vnd.curl.mcurl",
"text/vnd.curl.scurl", "text/vnd.dvb.subtitle", "text/vnd.fly",
"text/vnd.fmi.flexstor", "text/vnd.graphviz", "text/vnd.in3d.3dml",
"text/vnd.in3d.spot", "text/vnd.rn-realtext", "text/vnd.sun.j2me.app-descriptor",
"text/vnd.wap.si", "text/vnd.wap.sl", "text/vnd.wap.wml", "text/vnd.wap.wmlscript",
"text/vtt", "text/webviewhtml", "text/x-asm", "text/x-asm", "text/x-audiosoft-intra",
"text/x-c", "text/x-c", "text/x-c", "text/x-c", "text/x-c", "text/x-c",
"text/x-c", "text/x-c++hdr", "text/x-c++hdr", "text/x-c++hdr",
"text/x-c++hdr", "text/x-c++src", "text/x-c++src", "text/x-c++src",
"text/x-c++src", "text/x-chdr", "text/x-component", "text/x-csh",
"text/x-csrc", "text/x-fortran", "text/x-fortran", "text/x-fortran",
"text/x-fortran", "text/x-h", "text/x-h", "text/x-java", "text/x-java-source",
"text/x-java-source", "text/x-la-asf", "text/x-lua", "text/x-m",
"text/x-markdown", "text/x-markdown", "text/x-markdown", "text/x-moc",
"text/x-nfo", "text/x-opml", "text/x-pascal", "text/x-pascal",
"text/x-pcs-gcd", "text/x-perl", "text/x-perl", "text/x-python",
"text/x-script", "text/x-script.csh", "text/x-script.elisp",
"text/x-script.guile", "text/x-script.ksh", "text/x-script.lisp",
"text/x-script.perl", "text/x-script.perl-module", "text/x-script.phyton",
"text/x-script.rexx", "text/x-script.scheme", "text/x-script.sh",
"text/x-script.tcl", "text/x-script.tcsh", "text/x-script.zsh",
"text/x-server-parsed-html", "text/x-server-parsed-html", "text/x-setext",
"text/x-sfv", "text/x-sgml", "text/x-sgml", "text/x-sh", "text/x-speech",
"text/x-speech", "text/x-tcl", "text/x-tcl", "text/x-tex", "text/x-tex",
"text/x-tex", "text/x-tex", "text/x-uil", "text/x-uuencode",
"text/x-uuencode", "text/x-vcalendar", "text/x-vcard", "text/xml",
"video/3gpp", "video/3gpp2", "video/MP2T", "video/animaflex",
"video/avi", "video/avs-video", "video/dl", "video/flc", "video/flc",
"video/fli", "video/fli", "video/gl", "video/h261", "video/h263",
"video/h264", "video/jpeg", "video/jpm", "video/jpm", "video/mj2",
"video/mj2", "video/mp4", "video/mp4", "video/mp4", "video/mpeg",
"video/mpeg", "video/mpeg", "video/mpeg", "video/mpeg", "video/mpeg",
"video/mpeg", "video/mpeg", "video/mpeg", "video/msvideo", "video/ogg",
"video/quicktime", "video/quicktime", "video/quicktime", "video/vdo",
"video/vivo", "video/vivo", "video/vnd.dece.hd", "video/vnd.dece.hd",
"video/vnd.dece.mobile", "video/vnd.dece.mobile", "video/vnd.dece.pd",
"video/vnd.dece.pd", "video/vnd.dece.sd", "video/vnd.dece.sd",
"video/vnd.dece.video", "video/vnd.dece.video", "video/vnd.dvb.file",
"video/vnd.fvt", "video/vnd.mpegurl", "video/vnd.mpegurl", "video/vnd.ms-playready.media.pyv",
"video/vnd.rn-realvideo", "video/vnd.uvvu.mp4", "video/vnd.uvvu.mp4",
"video/vnd.vivo", "video/vnd.vivo", "video/vosaic", "video/webm",
"video/x-amt-demorun", "video/x-amt-showrun", "video/x-atomic3d-feature",
"video/x-dl", "video/x-dv", "video/x-dv", "video/x-f4v", "video/x-fli",
"video/x-flv", "video/x-gl", "video/x-isvideo", "video/x-la-asf",
"video/x-la-asf", "video/x-m4v", "video/x-matroska", "video/x-matroska",
"video/x-matroska", "video/x-mng", "video/x-motion-jpeg", "video/x-mpeg",
"video/x-mpeg", "video/x-mpeq2a", "video/x-ms-asf", "video/x-ms-asf",
"video/x-ms-asf", "video/x-ms-asf-plugin", "video/x-ms-vob",
"video/x-ms-wm", "video/x-ms-wmv", "video/x-ms-wmx", "video/x-ms-wvx",
"video/x-msvideo", "video/x-qtc", "video/x-scm", "video/x-sgi-movie",
"video/x-sgi-movie", "video/x-smv", "windows/metafile", "www/mime",
"x-conference/x-cooltalk", "x-music/x-midi", "x-music/x-midi",
"x-world/x-3dmf", "x-world/x-3dmf", "x-world/x-3dmf", "x-world/x-3dmf",
"x-world/x-svr", "x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml",
"x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml",
"x-world/x-vrt", "xgl/drawing", "xgl/movie", "application/mathematica",
"application/msword", "application/octet-stream", "application/onenote",
"application/pgp-signature", "application/pkcs7-mime", "application/postscript",
"application/smil+xml", "application/vnd.acucorp", "application/vnd.clonk.c4group",
"application/vnd.eszigno3+xml", "application/vnd.fdsn.seed",
"application/vnd.framemaker", "application/vnd.geometry-explorer",
"application/vnd.grafeq", "application/vnd.ibm.modcap", "application/vnd.iccprofile",
"application/vnd.intercon.formnet", "application/vnd.kahootz",
"application/vnd.kde.kpresenter", "application/vnd.kde.kword",
"application/vnd.kinar", "application/vnd.koan", "application/vnd.ms-excel",
"application/vnd.ms-powerpoint", "application/vnd.ms-project",
"application/vnd.ms-works", "application/vnd.palm", "application/vnd.quark.quarkxpress",
"application/vnd.simtech-mindmapper", "application/vnd.solent.sdkm+xml",
"application/vnd.sus-calendar", "application/vnd.symbian.install",
"application/vnd.ufdl", "application/vnd.visio", "application/vnd.zul",
"application/x-authorware-bin", "application/x-bzip2", "application/x-debian-package",
"application/x-director", "application/x-font-ttf", "application/x-font-type1",
"application/x-mobipocket-ebook", "application/x-msdownload",
"application/x-msmediaview", "application/x-netcdf", "application/x-pkcs12",
"application/x-pkcs7-certificates", "application/x-texinfo",
"application/x-x509-ca-cert", "application/xhtml+xml", "application/xml",
"application/xv+xml", "audio/basic", "audio/midi", "audio/mpeg",
"audio/ogg", "audio/x-aiff", "audio/x-pn-realaudio", "image/jpeg",
"image/svg+xml", "image/tiff", "image/vnd.djvu", "image/x-freehand",
"image/x-pict", "message/rfc822", "model/iges", "model/mesh",
"model/vrml", "text/calendar", "text/html", "text/plain", "text/sgml",
"text/troff", "text/uri-list", "text/x-asm", "text/x-c", "text/x-fortran",
"text/x-pascal", "video/jpm", "video/mj2", "video/mp4", "video/mpeg",
"video/quicktime", "video/vnd.mpegurl", "video/x-ms-asf")), row.names = c(NA,
-1763L), class = c("tbl_df", "tbl", "data.frame"), .Names = c("extension",
"mime_type")) -> simplemagic_mime_db
| /R/aaa.R | no_license | hrbrmstr/simplemagic | R | false | false | 58,667 | r | #' File extension-to-MIME mapping data frame
#'
#' @docType data
#' @export
structure(list(extension = c("pyc", "dwg", "ez", "aw", "arj",
"atom", "xml", "atomcat", "atomsvc", "mm", "mme", "hqx", "hqx",
"boo", "book", "ccxml", "cdf", "cdmia", "cdmic", "cdmid", "cdmio",
"cdmiq", "ccad", "dp", "cu", "csm", "davmount", "dbk", "drw",
"tsp", "dssc", "xdssc", "dxf", "es", "ecma", "js", "emma", "evy",
"epub", "xl", "xla", "xlb", "xlc", "xld", "xlk", "xll", "xlm",
"xls", "xlt", "xlv", "xlw", "exi", "pfr", "woff", "fif", "frl",
"spl", "gml", "tgz", "gpx", "vew", "gxf", "hlp", "hta", "stk",
"unv", "iges", "igs", "inf", "ink", "inkml", "acx", "ipfix",
"class", "jar", "class", "ser", "class", "js", "json", "jsonml",
"lha", "lostxml", "lzx", "bin", "hqx", "hqx", "cpt", "bin", "mads",
"mrc", "mrcx", "ma", "nb", "mb", "mathml", "mbd", "mbox", "mcd",
"mscml", "metalink", "meta4", "mets", "aps", "mods", "m21", "mp21",
"mp4", "m4p", "mp4s", "mdb", "one", "onetoc2", "onetmp", "onepkg",
"pot", "pps", "ppt", "ppz", "doc", "dot", "w6w", "wiz", "word",
"wri", "mxf", "mcp", "bin", "dms", "lrf", "mar", "so", "dist",
"distz", "pkg", "bpk", "dump", "elc", "a", "arc", "arj", "com",
"exe", "lha", "lhx", "lzh", "lzx", "o", "psd", "saveme", "uu",
"zoo", "class", "buffer", "deploy", "hqx", "obj", "lib", "zip",
"gz", "dmg", "iso", "oda", "opf", "ogx", "ogg", "axs", "omdoc",
"onetoc", "onetoc2", "onetmp", "onepkg", "oxps", "xer", "pdf",
"pgp", "key", "asc", "pgp", "sig", "prf", "p12", "crl", "p10",
"p7m", "p7c", "p7s", "p8", "ac", "cer", "crt", "crl", "pkipath",
"pki", "text", "pls", "ai", "eps", "ps", "ppt", "part", "prt",
"cww", "pskcxml", "rar", "rdf", "rif", "rnc", "rl", "rld", "rng",
"rs", "gbr", "mft", "roa", "rsd", "rss", "xml", "rtf", "rtx",
"sbml", "scq", "scs", "spq", "spp", "sdp", "sea", "set", "setpay",
"setreg", "shf", "stl", "smi", "smil", "smi", "smil", "sol",
"sdr", "rq", "srx", "gram", "grxml", "sru", "ssdl", "ssml", "step",
"stp", "ssm", "tei", "teicorpus", "tfi", "tsd", "tbk", "vda",
"plb", "psb", "pvb", "tcap", "pwn", "aso", "imp", "acu", "atc",
"acutc", "air", "fcdt", "fxp", "fxpl", "xdp", "xfdf", "ahead",
"azf", "azs", "azw", "acc", "ami", "apk", "cii", "fti", "atx",
"mpkg", "m3u8", "swi", "swi", "iota", "aep", "mpm", "bmi", "rep",
"cdxml", "mmd", "cdy", "cla", "rp9", "c4g", "c4d", "c4f", "c4p",
"c4u", "c11amc", "c11amz", "csp", "cdbcmsg", "cmc", "clkx", "clkk",
"clkp", "clkt", "clkw", "wbs", "pml", "ppd", "car", "pcurl",
"dart", "rdz", "uvf", "uvvf", "uvd", "uvvd", "uvt", "uvvt", "uvx",
"uvvx", "uvz", "uvvz", "fe_launch", "dna", "mlp", "dpg", "dfac",
"kpxx", "ait", "svc", "geo", "mag", "nml", "esf", "msf", "qam",
"slt", "ssf", "es3", "et3", "ez2", "ez3", "fdf", "mseed", "seed",
"dataless", "gph", "ftc", "fm", "frame", "maker", "book", "fnc",
"ltf", "fsc", "oas", "oa2", "oa3", "fg5", "bh2", "ddd", "xdw",
"xbd", "fzs", "txd", "ggb", "ggt", "gex", "gre", "gxt", "g2w",
"g3w", "gmx", "kml", "kmz", "gqf", "gqs", "gac", "ghf", "gim",
"grv", "gtm", "tpl", "vcg", "hal", "zmm", "hbci", "les", "hgl",
"hpg", "hpgl", "hpid", "hps", "jlt", "pcl", "pclxl", "sfd-hdstx",
"x3d", "mpy", "afp", "listafp", "list3820", "irm", "sc", "icc",
"icm", "igl", "ivp", "ivu", "igm", "xpw", "xpx", "i2g", "qbo",
"qfx", "rcprofile", "irp", "xpr", "fcs", "jam", "rms", "jisp",
"joda", "ktz", "ktr", "karbon", "chrt", "kfo", "flw", "kon",
"kpr", "kpt", "ksp", "kwd", "kwt", "htke", "kia", "kne", "knp",
"skp", "skd", "skt", "skm", "sse", "lasxml", "lbd", "lbe", "123",
"apr", "pre", "nsf", "org", "scm", "lwp", "portpkg", "mcd", "mc1",
"cdkey", "mwf", "mfm", "flo", "igx", "mif", "daf", "dis", "mbk",
"mqy", "msl", "plc", "txf", "mpn", "mpc", "xul", "cil", "cab",
"xls", "xlm", "xla", "xlc", "xlt", "xlb", "xll", "xlw", "xlam",
"xlam", "xlsb", "xlsb", "xlsm", "xlsm", "xltm", "xltm", "eot",
"chm", "ims", "lrm", "thmx", "msg", "sst", "pko", "cat", "stl",
"sst", "cat", "stl", "ppt", "pps", "pot", "ppa", "pwz", "ppam",
"ppam", "pptm", "potm", "pptm", "potm", "sldm", "sldm", "ppsm",
"ppsm", "potm", "potm", "mpp", "mpt", "docm", "docm", "dotm",
"dotm", "wps", "wks", "wcm", "wdb", "wpl", "xps", "mseq", "mus",
"msty", "taglet", "nlu", "ntf", "nitf", "nnd", "nns", "nnw",
"ncm", "ngdat", "n-gage", "rpst", "rpss", "rng", "edm", "edx",
"ext", "edm", "edx", "ext", "odc", "otc", "odb", "odf", "odft",
"odg", "otg", "odi", "oti", "odp", "otp", "ods", "ots", "odt",
"odm", "otm", "ott", "oth", "xo", "dd2", "oxt", "pptx", "sldx",
"ppsx", "potx", "xlsx", "xltx", "docx", "dotx", "mgp", "dp",
"esa", "pdb", "pqa", "oprc", "paw", "str", "ei6", "efif", "wg",
"plf", "pbd", "box", "mgz", "qps", "ptid", "qxd", "qxt", "qwd",
"qwt", "qxl", "qxb", "bed", "mxl", "musicxml", "cryptonote",
"cod", "rm", "rmvb", "rnx", "link66", "st", "see", "sema", "semd",
"semf", "ifm", "itp", "iif", "ipk", "twd", "twds", "mmf", "teacher",
"sdkm", "sdkd", "dxp", "sfs", "sdc", "sda", "sdd", "sdp", "smf",
"sdw", "vor", "sgl", "smzip", "sm", "sxc", "stc", "sxd", "std",
"sxi", "sti", "sxm", "sxw", "sxg", "stw", "sus", "susp", "svd",
"sis", "sisx", "xsm", "bdm", "xdm", "tao", "pcap", "cap", "dmp",
"tmo", "tpt", "mxs", "tra", "ufd", "ufdl", "utz", "umj", "unityweb",
"uoml", "vcx", "vsd", "vst", "vss", "vsw", "vis", "vsf", "sic",
"slc", "wbxml", "wmlc", "wmlsc", "wtb", "nbp", "wpd", "wqd",
"stf", "xar", "web", "xfdl", "hvd", "hvs", "hvp", "osf", "osfpvg",
"saf", "spf", "cmp", "zir", "zirz", "zaz", "vmd", "vmf", "vxml",
"wgt", "hlp", "wp", "wp5", "wp6", "wpd", "wp5", "w60", "wp5",
"w61", "wsdl", "wspolicy", "wk1", "wk", "7z", "abw", "ace", "aim",
"dmg", "aab", "x32", "u32", "vox", "aam", "aas", "bcpio", "bin",
"hqx", "torrent", "blb", "blorb", "bsh", "sh", "shar", "elc",
"elc", "bz", "bz2", "boz", "cbr", "cba", "cbt", "cbz", "cb7",
"cdf", "vcd", "cfs", "chat", "cha", "pgn", "chm", "crx", "ras",
"cco", "cpt", "z", "gz", "tgz", "z", "zip", "nsc", "cpio", "cpt",
"csh", "deb", "udeb", "deepv", "dgc", "dir", "dcr", "dxr", "cst",
"cct", "cxt", "w3d", "fgd", "swa", "dms", "wad", "ncx", "dtb",
"res", "dvi", "elc", "env", "evy", "es", "eva", "xla", "xlb",
"xlc", "xld", "xlk", "xll", "xlm", "xls", "xlt", "xlv", "xlw",
"flac", "pfa", "pfb", "gsf", "pcf", "pcf.Z", "bdf", "gsf", "psf",
"otf", "pcf", "snf", "ttf", "ttc", "pfa", "pfb", "pfm", "afm",
"woff", "mif", "arc", "pre", "spl", "gca", "ulx", "gnumeric",
"sgf", "gramps", "gcf", "gsp", "gss", "gtar", "tgz", "taz", "gz",
"gzip", "tgz", "hdf", "help", "hlp", "imap", "phtml", "pht",
"php", "phps", "php3", "php3p", "php4", "ica", "ima", "install",
"ins", "isp", "ins", "iv", "ip", "iii", "iso", "jar", "class",
"jcm", "jnlp", "ser", "class", "js", "chrt", "kil", "skd", "skm",
"skp", "skt", "kpr", "kpt", "ksh", "ksp", "kwd", "kwt", "latex",
"ltx", "lha", "lsp", "ivy", "wq1", "scm", "luac", "lzh", "lzh",
"lha", "lzx", "hqx", "bin", "mc$", "frm", "maker", "frame", "fm",
"fb", "book", "fbdoc", "mcd", "mm", "mid", "midi", "mie", "mif",
"nix", "prc", "mobi", "m3u8", "asx", "application", "lnk", "wmd",
"wmz", "xbap", "mdb", "obd", "crd", "clp", "com", "exe", "bat",
"dll", "exe", "dll", "com", "bat", "msi", "xla", "xls", "xlw",
"msi", "mvb", "m13", "m14", "wmf", "wmz", "emf", "emz", "mny",
"ppt", "pub", "scd", "trm", "wri", "ani", "nvd", "map", "stl",
"nc", "cdf", "pkg", "aos", "pac", "nwc", "nzb", "o", "omc", "omcd",
"omcr", "oza", "pm4", "pm5", "pcl", "pma", "pmc", "pml", "pmr",
"pmw", "plx", "p10", "p12", "pfx", "p7b", "spc", "p7r", "crl",
"p7c", "p7m", "p7a", "p7s", "css", "pnm", "mpc", "mpt", "mpv",
"mpx", "pyc", "pyo", "wb1", "qtl", "rar", "rpm", "ris", "rpm",
"rtf", "sdp", "sea", "sl", "sh", "shar", "sh", "swf", "swfl",
"xap", "sit", "spr", "sprite", "sql", "sit", "sitx", "srt", "sv4cpio",
"sv4crc", "t3", "gam", "tar", "sbk", "tbk", "tcl", "tex", "gf",
"pk", "tfm", "texinfo", "texi", "obj", "~", "%", "bak", "old",
"sik", "roff", "t", "tr", "man", "me", "ms", "avi", "ustar",
"vsd", "vst", "vsw", "mzz", "xpix", "vrml", "src", "wsrc", "webapp",
"wz", "hlp", "wtk", "svr", "wrl", "wpd", "wri", "der", "cer",
"crt", "crt", "xcf", "fig", "xlf", "xpi", "xz", "zip", "z1",
"z2", "z3", "z4", "z5", "z6", "z7", "z8", "xaml", "xdf", "xenc",
"xhtml", "xht", "xml", "xsl", "xpdl", "dtd", "xop", "xpl", "xslt",
"xspf", "mxml", "xhvml", "xvml", "xvm", "yang", "yin", "pko",
"zip", "adp", "aif", "aifc", "aiff", "au", "snd", "flac", "it",
"funk", "my", "pfunk", "pfunk", "rmi", "mid", "mid", "midi",
"kar", "rmi", "mod", "mp4a", "m4a", "mpga", "mp2", "mp2a", "mp3",
"m2a", "mpa", "mpg", "m3a", "mpega", "m4a", "mp3", "m3u", "la",
"lma", "oga", "ogg", "spx", "sid", "s3m", "sil", "tsi", "tsp",
"uva", "uvva", "eol", "dra", "dts", "dtshd", "lvp", "pya", "ecelp4800",
"ecelp7470", "ecelp9600", "qcp", "rip", "voc", "vox", "wav",
"weba", "aac", "snd", "aif", "aiff", "aifc", "au", "caf", "flac",
"gsd", "gsm", "jam", "lam", "mka", "mid", "midi", "mid", "midi",
"mod", "mp2", "mp3", "m3u", "m3u", "wax", "wma", "la", "lma",
"ram", "ra", "rm", "rmm", "rmp", "rmp", "ra", "rpm", "sid", "ra",
"pls", "sd2", "vqf", "vqe", "vql", "mjf", "voc", "wav", "xm",
"cdx", "cif", "cmdf", "cml", "csml", "pdb", "xyz", "xyz", "dwf",
"dwf", "otf", "ivr", "bmp", "bm", "cgm", "cod", "ras", "rast",
"fif", "flo", "turbot", "g3", "gif", "ief", "iefs", "jpeg", "jpg",
"jfif", "jfif-tbnl", "jpe", "jut", "ktx", "nap", "naplps", "pcx",
"pic", "pict", "jfif", "jfif", "jpe", "jpeg", "jpg", "png", "x-png",
"btif", "sgi", "svg", "svgz", "tiff", "tif", "mcf", "psd", "uvi",
"uvvi", "uvg", "uvvg", "djvu", "djv", "sub", "dwg", "dxf", "svf",
"dxf", "fbs", "fpx", "fpix", "fst", "mmr", "rlc", "mdi", "wdp",
"npx", "fpx", "rf", "rp", "wbmp", "xif", "webp", "3ds", "ras",
"ras", "cmx", "cdr", "pat", "cdt", "cpt", "dwg", "dxf", "svf",
"fh", "fhc", "fh4", "fh5", "fh7", "ico", "art", "jng", "jps",
"sid", "bmp", "nif", "niff", "pcx", "psd", "pic", "pct", "pnm",
"pbm", "pgm", "pgm", "ppm", "qif", "qti", "qtif", "rgb", "tga",
"tif", "tiff", "bmp", "xbm", "xpm", "xbm", "xpm", "pm", "xwd",
"xwd", "xbm", "xpm", "eml", "mht", "mhtml", "mime", "nws", "igs",
"iges", "msh", "mesh", "silo", "dae", "dwf", "gdl", "gtw", "mts",
"vtu", "wrl", "vrml", "wrz", "pov", "x3db", "x3dbz", "x3dv",
"x3dvz", "x3d", "x3dz", "gzip", "ustar", "zip", "mid", "midi",
"kar", "pvu", "asp", "appcache", "manifest", "ics", "ifb", "icz",
"csv", "css", "csv", "js", "event-stream", "323", "html", "acgi",
"htm", "htmls", "htx", "shtml", "stm", "uls", "js", "mml", "mcf",
"n3", "pas", "txt", "text", "conf", "def", "list", "log", "c",
"c++", "cc", "com", "cxx", "f", "f90", "for", "g", "h", "hh",
"idc", "jav", "java", "lst", "m", "mar", "pl", "sdml", "bas",
"in", "asc", "diff", "pot", "el", "ksh", "par", "dsc", "rtx",
"rt", "rtf", "rtf", "wsc", "sct", "wsc", "sgml", "sgm", "tsv",
"tm", "ts", "t", "tr", "roff", "man", "me", "ms", "ttl", "uri",
"uris", "uni", "unis", "urls", "vcard", "abc", "curl", "dcurl",
"mcurl", "scurl", "sub", "fly", "flx", "gv", "3dml", "spot",
"rt", "jad", "si", "sl", "wml", "wmls", "vtt", "htt", "s", "asm",
"aip", "c", "cc", "cxx", "cpp", "h", "hh", "dic", "h++", "hpp",
"hxx", "hh", "c++", "cpp", "cxx", "cc", "h", "htc", "csh", "c",
"f", "for", "f77", "f90", "h", "hh", "java", "java", "jav", "lsx",
"lua", "m", "markdown", "md", "mkd", "moc", "nfo", "opml", "p",
"pas", "gcd", "pl", "pm", "py", "hlb", "csh", "el", "scm", "ksh",
"lsp", "pl", "pm", "py", "rexx", "scm", "sh", "tcl", "tcsh",
"zsh", "shtml", "ssi", "etx", "sfv", "sgm", "sgml", "sh", "spc",
"talk", "tcl", "tk", "tex", "ltx", "sty", "cls", "uil", "uu",
"uue", "vcs", "vcf", "xml", "3gp", "3g2", "ts", "afl", "avi",
"avs", "dl", "flc", "fli", "flc", "fli", "gl", "h261", "h263",
"h264", "jpgv", "jpm", "jpgm", "mj2", "mjp2", "mp4", "mp4v",
"mpg4", "mpeg", "mpg", "mpe", "m1v", "m2v", "mp2", "mp3", "mpa",
"mpv2", "avi", "ogv", "qt", "moov", "mov", "vdo", "viv", "vivo",
"uvh", "uvvh", "uvm", "uvvm", "uvp", "uvvp", "uvs", "uvvs", "uvv",
"uvvv", "dvb", "fvt", "mxu", "m4u", "pyv", "rv", "uvu", "uvvu",
"viv", "vivo", "vos", "webm", "xdr", "xsr", "fmf", "dl", "dif",
"dv", "f4v", "fli", "flv", "gl", "isu", "lsf", "lsx", "m4v",
"mkv", "mk3d", "mks", "mng", "mjpg", "mp2", "mp3", "mp2", "asf",
"asx", "asr", "asx", "vob", "wm", "wmv", "wmx", "wvx", "avi",
"qtc", "scm", "movie", "mv", "smv", "wmf", "mime", "ice", "mid",
"midi", "3dm", "3dmf", "qd3", "qd3d", "svr", "vrml", "wrl", "wrz",
"flr", "xaf", "xof", "vrm", "vrt", "xgz", "xmz", "ma nb mb",
"doc dot", "bin dms lha lrf lzh so iso dmg dist distz pkg bpk dump elc deploy",
"onetoc onetoc2 onetmp onepkg", "asc sig", "p7m p7c", "ai eps ps",
"smi smil", "atc acutc", "c4g c4d c4f c4p c4u", "es3 et3", "seed dataless",
"fm frame maker book", "gex gre", "gqf gqs", "afp listafp list3820",
"icc icm", "xpw xpx", "ktz ktr", "kpr kpt", "kwd kwt", "kne knp",
"skp skd skt skm", "xls xlm xla xlc xlt xlw", "ppt pps pot",
"mpp mpt", "wps wks wcm wdb", "pdb pqa oprc", "qxd qxt qwd qwt qxl qxb",
"twd twds", "sdkm sdkd", "sus susp", "sis sisx", "ufd ufdl",
"vsd vst vss vsw", "zir zirz", "aab x32 u32 vox", "bz2 boz",
"deb udeb", "dir dcr dxr cst cct cxt w3d fgd swa", "ttf ttc",
"pfa pfb pfm afm", "prc mobi", "exe dll com bat msi", "mvb m13 m14",
"nc cdf", "p12 pfx", "p7b spc", "texinfo texi", "der crt", "xhtml xht",
"xml xsl", "mxml xhvml xvml xvm", "au snd", "mid midi kar rmi",
"mpga mp2 mp2a mp3 m2a m3a", "oga ogg spx", "aif aiff aifc",
"ram ra", "jpeg jpg jpe", "svg svgz", "tiff tif", "djvu djv",
"fh fhc fh4 fh5 fh7", "pic pct", "eml mime", "igs iges", "msh mesh silo",
"wrl vrml", "ics ifb", "html htm", "txt text conf def list log in",
"sgml sgm", "t tr roff man me ms", "uri uris urls", "s asm",
"c cc cxx cpp h hh dic", "f for f77 f90", "p pas", "jpm jpgm",
"mj2 mjp2", "mp4 mp4v mpg4", "mpeg mpg mpe m1v m2v", "qt mov",
"mxu m4u", "asf asx"), mime_type = c("application/x-bytecode.python",
"application/acad", "application/andrew-inset", "application/applixware",
"application/arj", "application/atom+xml", "application/atom+xml",
"application/atomcat+xml", "application/atomsvc+xml", "application/base64",
"application/base64", "application/binhex", "application/binhex4",
"application/book", "application/book", "application/ccxml+xml",
"application/cdf", "application/cdmi-capability", "application/cdmi-container",
"application/cdmi-domain", "application/cdmi-object", "application/cdmi-queue",
"application/clariscad", "application/commonground", "application/cu-seeme",
"application/cu-seeme", "application/davmount+xml", "application/docbook+xml",
"application/drafting", "application/dsptype", "application/dssc+der",
"application/dssc+xml", "application/dxf", "application/ecmascript",
"application/ecmascript", "application/ecmascript", "application/emma+xml",
"application/envoy", "application/epub+zip", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/excel",
"application/excel", "application/excel", "application/exi",
"application/font-tdpfr", "application/font-woff", "application/fractals",
"application/freeloader", "application/futuresplash", "application/gml+xml",
"application/gnutar", "application/gpx+xml", "application/groupwise",
"application/gxf", "application/hlp", "application/hta", "application/hyperstudio",
"application/i-deas", "application/iges", "application/iges",
"application/inf", "application/inkml+xml", "application/inkml+xml",
"application/internet-property-stream", "application/ipfix",
"application/java", "application/java-archive", "application/java-byte-code",
"application/java-serialized-object", "application/java-vm",
"application/javascript", "application/json", "application/jsonml+json",
"application/lha", "application/lost+xml", "application/lzx",
"application/mac-binary", "application/mac-binhex", "application/mac-binhex40",
"application/mac-compactpro", "application/macbinary", "application/mads+xml",
"application/marc", "application/marcxml+xml", "application/mathematica",
"application/mathematica", "application/mathematica", "application/mathml+xml",
"application/mbedlet", "application/mbox", "application/mcad",
"application/mediaservercontrol+xml", "application/metalink+xml",
"application/metalink4+xml", "application/mets+xml", "application/mime",
"application/mods+xml", "application/mp21", "application/mp21",
"application/mp4", "application/mp4", "application/mp4", "application/msaccess",
"application/msonenote", "application/msonenote", "application/msonenote",
"application/msonenote", "application/mspowerpoint", "application/mspowerpoint",
"application/mspowerpoint", "application/mspowerpoint", "application/msword",
"application/msword", "application/msword", "application/msword",
"application/msword", "application/mswrite", "application/mxf",
"application/netmc", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/octet-stream", "application/octet-stream", "application/octet-stream",
"application/oda", "application/oebps-package+xml", "application/ogg",
"application/ogg", "application/olescript", "application/omdoc+xml",
"application/onenote", "application/onenote", "application/onenote",
"application/onenote", "application/oxps", "application/patch-ops-error+xml",
"application/pdf", "application/pgp-encrypted", "application/pgp-keys",
"application/pgp-signature", "application/pgp-signature", "application/pgp-signature",
"application/pics-rules", "application/pkcs-12", "application/pkcs-crl",
"application/pkcs10", "application/pkcs7-mime", "application/pkcs7-mime",
"application/pkcs7-signature", "application/pkcs8", "application/pkix-attr-cert",
"application/pkix-cert", "application/pkix-cert", "application/pkix-crl",
"application/pkix-pkipath", "application/pkixcmp", "application/plain",
"application/pls+xml", "application/postscript", "application/postscript",
"application/postscript", "application/powerpoint", "application/pro_eng",
"application/pro_eng", "application/prs.cww", "application/pskc+xml",
"application/rar", "application/rdf+xml", "application/reginfo+xml",
"application/relax-ng-compact-syntax", "application/resource-lists+xml",
"application/resource-lists-diff+xml", "application/ringing-tones",
"application/rls-services+xml", "application/rpki-ghostbusters",
"application/rpki-manifest", "application/rpki-roa", "application/rsd+xml",
"application/rss+xml", "application/rss+xml", "application/rtf",
"application/rtf", "application/sbml+xml", "application/scvp-cv-request",
"application/scvp-cv-response", "application/scvp-vp-request",
"application/scvp-vp-response", "application/sdp", "application/sea",
"application/set", "application/set-payment-initiation", "application/set-registration-initiation",
"application/shf+xml", "application/sla", "application/smil",
"application/smil", "application/smil+xml", "application/smil+xml",
"application/solids", "application/sounder", "application/sparql-query",
"application/sparql-results+xml", "application/srgs", "application/srgs+xml",
"application/sru+xml", "application/ssdl+xml", "application/ssml+xml",
"application/step", "application/step", "application/streamingmedia",
"application/tei+xml", "application/tei+xml", "application/thraud+xml",
"application/timestamped-data", "application/toolbook", "application/vda",
"application/vnd.3gpp.pic-bw-large", "application/vnd.3gpp.pic-bw-small",
"application/vnd.3gpp.pic-bw-var", "application/vnd.3gpp2.tcap",
"application/vnd.3m.post-it-notes", "application/vnd.accpac.simply.aso",
"application/vnd.accpac.simply.imp", "application/vnd.acucobol",
"application/vnd.acucorp", "application/vnd.acucorp", "application/vnd.adobe.air-application-installer-package+zip",
"application/vnd.adobe.formscentral.fcdt", "application/vnd.adobe.fxp",
"application/vnd.adobe.fxp", "application/vnd.adobe.xdp+xml",
"application/vnd.adobe.xfdf", "application/vnd.ahead.space",
"application/vnd.airzip.filesecure.azf", "application/vnd.airzip.filesecure.azs",
"application/vnd.amazon.ebook", "application/vnd.americandynamics.acc",
"application/vnd.amiga.ami", "application/vnd.android.package-archive",
"application/vnd.anser-web-certificate-issue-initiation", "application/vnd.anser-web-funds-transfer-initiation",
"application/vnd.antix.game-component", "application/vnd.apple.installer+xml",
"application/vnd.apple.mpegurl", "application/vnd.arastra.swi",
"application/vnd.aristanetworks.swi", "application/vnd.astraea-software.iota",
"application/vnd.audiograph", "application/vnd.blueice.multipass",
"application/vnd.bmi", "application/vnd.businessobjects", "application/vnd.chemdraw+xml",
"application/vnd.chipnuts.karaoke-mmd", "application/vnd.cinderella",
"application/vnd.claymore", "application/vnd.cloanto.rp9", "application/vnd.clonk.c4group",
"application/vnd.clonk.c4group", "application/vnd.clonk.c4group",
"application/vnd.clonk.c4group", "application/vnd.clonk.c4group",
"application/vnd.cluetrust.cartomobile-config", "application/vnd.cluetrust.cartomobile-config-pkg",
"application/vnd.commonspace", "application/vnd.contact.cmsg",
"application/vnd.cosmocaller", "application/vnd.crick.clicker",
"application/vnd.crick.clicker.keyboard", "application/vnd.crick.clicker.palette",
"application/vnd.crick.clicker.template", "application/vnd.crick.clicker.wordbank",
"application/vnd.criticaltools.wbs+xml", "application/vnd.ctc-posml",
"application/vnd.cups-ppd", "application/vnd.curl.car", "application/vnd.curl.pcurl",
"application/vnd.dart", "application/vnd.data-vision.rdz", "application/vnd.dece.data",
"application/vnd.dece.data", "application/vnd.dece.data", "application/vnd.dece.data",
"application/vnd.dece.ttml+xml", "application/vnd.dece.ttml+xml",
"application/vnd.dece.unspecified", "application/vnd.dece.unspecified",
"application/vnd.dece.zip", "application/vnd.dece.zip", "application/vnd.denovo.fcselayout-link",
"application/vnd.dna", "application/vnd.dolby.mlp", "application/vnd.dpgraph",
"application/vnd.dreamfactory", "application/vnd.ds-keypoint",
"application/vnd.dvb.ait", "application/vnd.dvb.service", "application/vnd.dynageo",
"application/vnd.ecowin.chart", "application/vnd.enliven", "application/vnd.epson.esf",
"application/vnd.epson.msf", "application/vnd.epson.quickanime",
"application/vnd.epson.salt", "application/vnd.epson.ssf", "application/vnd.eszigno3+xml",
"application/vnd.eszigno3+xml", "application/vnd.ezpix-album",
"application/vnd.ezpix-package", "application/vnd.fdf", "application/vnd.fdsn.mseed",
"application/vnd.fdsn.seed", "application/vnd.fdsn.seed", "application/vnd.flographit",
"application/vnd.fluxtime.clip", "application/vnd.framemaker",
"application/vnd.framemaker", "application/vnd.framemaker", "application/vnd.framemaker",
"application/vnd.frogans.fnc", "application/vnd.frogans.ltf",
"application/vnd.fsc.weblaunch", "application/vnd.fujitsu.oasys",
"application/vnd.fujitsu.oasys2", "application/vnd.fujitsu.oasys3",
"application/vnd.fujitsu.oasysgp", "application/vnd.fujitsu.oasysprs",
"application/vnd.fujixerox.ddd", "application/vnd.fujixerox.docuworks",
"application/vnd.fujixerox.docuworks.binder", "application/vnd.fuzzysheet",
"application/vnd.genomatix.tuxedo", "application/vnd.geogebra.file",
"application/vnd.geogebra.tool", "application/vnd.geometry-explorer",
"application/vnd.geometry-explorer", "application/vnd.geonext",
"application/vnd.geoplan", "application/vnd.geospace", "application/vnd.gmx",
"application/vnd.google-earth.kml+xml", "application/vnd.google-earth.kmz",
"application/vnd.grafeq", "application/vnd.grafeq", "application/vnd.groove-account",
"application/vnd.groove-help", "application/vnd.groove-identity-message",
"application/vnd.groove-injector", "application/vnd.groove-tool-message",
"application/vnd.groove-tool-template", "application/vnd.groove-vcard",
"application/vnd.hal+xml", "application/vnd.handheld-entertainment+xml",
"application/vnd.hbci", "application/vnd.hhe.lesson-player",
"application/vnd.hp-hpgl", "application/vnd.hp-hpgl", "application/vnd.hp-hpgl",
"application/vnd.hp-hpid", "application/vnd.hp-hps", "application/vnd.hp-jlyt",
"application/vnd.hp-pcl", "application/vnd.hp-pclxl", "application/vnd.hydrostatix.sof-data",
"application/vnd.hzn-3d-crossword", "application/vnd.ibm.minipay",
"application/vnd.ibm.modcap", "application/vnd.ibm.modcap", "application/vnd.ibm.modcap",
"application/vnd.ibm.rights-management", "application/vnd.ibm.secure-container",
"application/vnd.iccprofile", "application/vnd.iccprofile", "application/vnd.igloader",
"application/vnd.immervision-ivp", "application/vnd.immervision-ivu",
"application/vnd.insors.igm", "application/vnd.intercon.formnet",
"application/vnd.intercon.formnet", "application/vnd.intergeo",
"application/vnd.intu.qbo", "application/vnd.intu.qfx", "application/vnd.ipunplugged.rcprofile",
"application/vnd.irepository.package+xml", "application/vnd.is-xpr",
"application/vnd.isac.fcs", "application/vnd.jam", "application/vnd.jcp.javame.midlet-rms",
"application/vnd.jisp", "application/vnd.joost.joda-archive",
"application/vnd.kahootz", "application/vnd.kahootz", "application/vnd.kde.karbon",
"application/vnd.kde.kchart", "application/vnd.kde.kformula",
"application/vnd.kde.kivio", "application/vnd.kde.kontour", "application/vnd.kde.kpresenter",
"application/vnd.kde.kpresenter", "application/vnd.kde.kspread",
"application/vnd.kde.kword", "application/vnd.kde.kword", "application/vnd.kenameaapp",
"application/vnd.kidspiration", "application/vnd.kinar", "application/vnd.kinar",
"application/vnd.koan", "application/vnd.koan", "application/vnd.koan",
"application/vnd.koan", "application/vnd.kodak-descriptor", "application/vnd.las.las+xml",
"application/vnd.llamagraphics.life-balance.desktop", "application/vnd.llamagraphics.life-balance.exchange+xml",
"application/vnd.lotus-1-2-3", "application/vnd.lotus-approach",
"application/vnd.lotus-freelance", "application/vnd.lotus-notes",
"application/vnd.lotus-organizer", "application/vnd.lotus-screencam",
"application/vnd.lotus-wordpro", "application/vnd.macports.portpkg",
"application/vnd.mcd", "application/vnd.medcalcdata", "application/vnd.mediastation.cdkey",
"application/vnd.mfer", "application/vnd.mfmp", "application/vnd.micrografx.flo",
"application/vnd.micrografx.igx", "application/vnd.mif", "application/vnd.mobius.daf",
"application/vnd.mobius.dis", "application/vnd.mobius.mbk", "application/vnd.mobius.mqy",
"application/vnd.mobius.msl", "application/vnd.mobius.plc", "application/vnd.mobius.txf",
"application/vnd.mophun.application", "application/vnd.mophun.certificate",
"application/vnd.mozilla.xul+xml", "application/vnd.ms-artgalry",
"application/vnd.ms-cab-compressed", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel", "application/vnd.ms-excel",
"application/vnd.ms-excel", "application/vnd.ms-excel.addin.macroEnabled.12",
"application/vnd.ms-excel.addin.macroenabled.12", "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
"application/vnd.ms-excel.sheet.binary.macroenabled.12", "application/vnd.ms-excel.sheet.macroEnabled.12",
"application/vnd.ms-excel.sheet.macroenabled.12", "application/vnd.ms-excel.template.macroEnabled.12",
"application/vnd.ms-excel.template.macroenabled.12", "application/vnd.ms-fontobject",
"application/vnd.ms-htmlhelp", "application/vnd.ms-ims", "application/vnd.ms-lrm",
"application/vnd.ms-officetheme", "application/vnd.ms-outlook",
"application/vnd.ms-pki.certstore", "application/vnd.ms-pki.pko",
"application/vnd.ms-pki.seccat", "application/vnd.ms-pki.stl",
"application/vnd.ms-pkicertstore", "application/vnd.ms-pkiseccat",
"application/vnd.ms-pkistl", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint", "application/vnd.ms-powerpoint",
"application/vnd.ms-powerpoint.addin.macroEnabled.12", "application/vnd.ms-powerpoint.addin.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroEnabled.12",
"application/vnd.ms-powerpoint.presentation.macroEnabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12",
"application/vnd.ms-powerpoint.slide.macroEnabled.12", "application/vnd.ms-powerpoint.slide.macroenabled.12",
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12", "application/vnd.ms-powerpoint.slideshow.macroenabled.12",
"application/vnd.ms-powerpoint.template.macroEnabled.12", "application/vnd.ms-powerpoint.template.macroenabled.12",
"application/vnd.ms-project", "application/vnd.ms-project", "application/vnd.ms-word.document.macroEnabled.12",
"application/vnd.ms-word.document.macroenabled.12", "application/vnd.ms-word.template.macroEnabled.12",
"application/vnd.ms-word.template.macroenabled.12", "application/vnd.ms-works",
"application/vnd.ms-works", "application/vnd.ms-works", "application/vnd.ms-works",
"application/vnd.ms-wpl", "application/vnd.ms-xpsdocument", "application/vnd.mseq",
"application/vnd.musician", "application/vnd.muvee.style", "application/vnd.mynfc",
"application/vnd.neurolanguage.nlu", "application/vnd.nitf",
"application/vnd.nitf", "application/vnd.noblenet-directory",
"application/vnd.noblenet-sealer", "application/vnd.noblenet-web",
"application/vnd.nokia.configuration-message", "application/vnd.nokia.n-gage.data",
"application/vnd.nokia.n-gage.symbian.install", "application/vnd.nokia.radio-preset",
"application/vnd.nokia.radio-presets", "application/vnd.nokia.ringing-tone",
"application/vnd.novadigm.EDM", "application/vnd.novadigm.EDX",
"application/vnd.novadigm.EXT", "application/vnd.novadigm.edm",
"application/vnd.novadigm.edx", "application/vnd.novadigm.ext",
"application/vnd.oasis.opendocument.chart", "application/vnd.oasis.opendocument.chart-template",
"application/vnd.oasis.opendocument.database", "application/vnd.oasis.opendocument.formula",
"application/vnd.oasis.opendocument.formula-template", "application/vnd.oasis.opendocument.graphics",
"application/vnd.oasis.opendocument.graphics-template", "application/vnd.oasis.opendocument.image",
"application/vnd.oasis.opendocument.image-template", "application/vnd.oasis.opendocument.presentation",
"application/vnd.oasis.opendocument.presentation-template", "application/vnd.oasis.opendocument.spreadsheet",
"application/vnd.oasis.opendocument.spreadsheet-template", "application/vnd.oasis.opendocument.text",
"application/vnd.oasis.opendocument.text-master", "application/vnd.oasis.opendocument.text-master",
"application/vnd.oasis.opendocument.text-template", "application/vnd.oasis.opendocument.text-web",
"application/vnd.olpc-sugar", "application/vnd.oma.dd2+xml",
"application/vnd.openofficeorg.extension", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.openxmlformats-officedocument.presentationml.slide",
"application/vnd.openxmlformats-officedocument.presentationml.slideshow",
"application/vnd.openxmlformats-officedocument.presentationml.template",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template",
"application/vnd.osgeo.mapguide.package", "application/vnd.osgi.dp",
"application/vnd.osgi.subsystem", "application/vnd.palm", "application/vnd.palm",
"application/vnd.palm", "application/vnd.pawaafile", "application/vnd.pg.format",
"application/vnd.pg.osasli", "application/vnd.picsel", "application/vnd.pmi.widget",
"application/vnd.pocketlearn", "application/vnd.powerbuilder6",
"application/vnd.previewsystems.box", "application/vnd.proteus.magazine",
"application/vnd.publishare-delta-tree", "application/vnd.pvi.ptid1",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.quark.quarkxpress", "application/vnd.quark.quarkxpress",
"application/vnd.realvnc.bed", "application/vnd.recordare.musicxml",
"application/vnd.recordare.musicxml+xml", "application/vnd.rig.cryptonote",
"application/vnd.rim.cod", "application/vnd.rn-realmedia", "application/vnd.rn-realmedia-vbr",
"application/vnd.rn-realplayer", "application/vnd.route66.link66+xml",
"application/vnd.sailingtracker.track", "application/vnd.seemail",
"application/vnd.sema", "application/vnd.semd", "application/vnd.semf",
"application/vnd.shana.informed.formdata", "application/vnd.shana.informed.formtemplate",
"application/vnd.shana.informed.interchange", "application/vnd.shana.informed.package",
"application/vnd.simtech-mindmapper", "application/vnd.simtech-mindmapper",
"application/vnd.smaf", "application/vnd.smart.teacher", "application/vnd.solent.sdkm+xml",
"application/vnd.solent.sdkm+xml", "application/vnd.spotfire.dxp",
"application/vnd.spotfire.sfs", "application/vnd.stardivision.calc",
"application/vnd.stardivision.draw", "application/vnd.stardivision.impress",
"application/vnd.stardivision.impress", "application/vnd.stardivision.math",
"application/vnd.stardivision.writer", "application/vnd.stardivision.writer",
"application/vnd.stardivision.writer-global", "application/vnd.stepmania.package",
"application/vnd.stepmania.stepchart", "application/vnd.sun.xml.calc",
"application/vnd.sun.xml.calc.template", "application/vnd.sun.xml.draw",
"application/vnd.sun.xml.draw.template", "application/vnd.sun.xml.impress",
"application/vnd.sun.xml.impress.template", "application/vnd.sun.xml.math",
"application/vnd.sun.xml.writer", "application/vnd.sun.xml.writer.global",
"application/vnd.sun.xml.writer.template", "application/vnd.sus-calendar",
"application/vnd.sus-calendar", "application/vnd.svd", "application/vnd.symbian.install",
"application/vnd.symbian.install", "application/vnd.syncml+xml",
"application/vnd.syncml.dm+wbxml", "application/vnd.syncml.dm+xml",
"application/vnd.tao.intent-module-archive", "application/vnd.tcpdump.pcap",
"application/vnd.tcpdump.pcap", "application/vnd.tcpdump.pcap",
"application/vnd.tmobile-livetv", "application/vnd.trid.tpt",
"application/vnd.triscape.mxs", "application/vnd.trueapp", "application/vnd.ufdl",
"application/vnd.ufdl", "application/vnd.uiq.theme", "application/vnd.umajin",
"application/vnd.unity", "application/vnd.uoml+xml", "application/vnd.vcx",
"application/vnd.visio", "application/vnd.visio", "application/vnd.visio",
"application/vnd.visio", "application/vnd.visionary", "application/vnd.vsf",
"application/vnd.wap.sic", "application/vnd.wap.slc", "application/vnd.wap.wbxml",
"application/vnd.wap.wmlc", "application/vnd.wap.wmlscriptc",
"application/vnd.webturbo", "application/vnd.wolfram.player",
"application/vnd.wordperfect", "application/vnd.wqd", "application/vnd.wt.stf",
"application/vnd.xara", "application/vnd.xara", "application/vnd.xfdl",
"application/vnd.yamaha.hv-dic", "application/vnd.yamaha.hv-script",
"application/vnd.yamaha.hv-voice", "application/vnd.yamaha.openscoreformat",
"application/vnd.yamaha.openscoreformat.osfpvg+xml", "application/vnd.yamaha.smaf-audio",
"application/vnd.yamaha.smaf-phrase", "application/vnd.yellowriver-custom-menu",
"application/vnd.zul", "application/vnd.zul", "application/vnd.zzazz.deck+xml",
"application/vocaltec-media-desc", "application/vocaltec-media-file",
"application/voicexml+xml", "application/widget", "application/winhlp",
"application/wordperfect", "application/wordperfect", "application/wordperfect",
"application/wordperfect", "application/wordperfect5.1", "application/wordperfect6.0",
"application/wordperfect6.0", "application/wordperfect6.1", "application/wsdl+xml",
"application/wspolicy+xml", "application/x-123", "application/x-123",
"application/x-7z-compressed", "application/x-abiword", "application/x-ace-compressed",
"application/x-aim", "application/x-apple-diskimage", "application/x-authorware-bin",
"application/x-authorware-bin", "application/x-authorware-bin",
"application/x-authorware-bin", "application/x-authorware-map",
"application/x-authorware-seg", "application/x-bcpio", "application/x-binary",
"application/x-binhex40", "application/x-bittorrent", "application/x-blorb",
"application/x-blorb", "application/x-bsh", "application/x-bsh",
"application/x-bsh", "application/x-bytecode.elisp", "application/x-bytecode.elisp(compiledelisp)",
"application/x-bzip", "application/x-bzip2", "application/x-bzip2",
"application/x-cbr", "application/x-cbr", "application/x-cbr",
"application/x-cbr", "application/x-cbr", "application/x-cdf",
"application/x-cdlink", "application/x-cfs-compressed", "application/x-chat",
"application/x-chat", "application/x-chess-pgn", "application/x-chm",
"application/x-chrome-extension", "application/x-cmu-raster",
"application/x-cocoa", "application/x-compactpro", "application/x-compress",
"application/x-compressed", "application/x-compressed", "application/x-compressed",
"application/x-compressed", "application/x-conference", "application/x-cpio",
"application/x-cpt", "application/x-csh", "application/x-debian-package",
"application/x-debian-package", "application/x-deepv", "application/x-dgc-compressed",
"application/x-director", "application/x-director", "application/x-director",
"application/x-director", "application/x-director", "application/x-director",
"application/x-director", "application/x-director", "application/x-director",
"application/x-dms", "application/x-doom", "application/x-dtbncx+xml",
"application/x-dtbook+xml", "application/x-dtbresource+xml",
"application/x-dvi", "application/x-elc", "application/x-envoy",
"application/x-envoy", "application/x-esrehber", "application/x-eva",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-excel",
"application/x-excel", "application/x-excel", "application/x-flac",
"application/x-font", "application/x-font", "application/x-font",
"application/x-font", "application/x-font", "application/x-font-bdf",
"application/x-font-ghostscript", "application/x-font-linux-psf",
"application/x-font-otf", "application/x-font-pcf", "application/x-font-snf",
"application/x-font-ttf", "application/x-font-ttf", "application/x-font-type1",
"application/x-font-type1", "application/x-font-type1", "application/x-font-type1",
"application/x-font-woff", "application/x-frame", "application/x-freearc",
"application/x-freelance", "application/x-futuresplash", "application/x-gca-compressed",
"application/x-glulx", "application/x-gnumeric", "application/x-go-sgf",
"application/x-gramps-xml", "application/x-graphing-calculator",
"application/x-gsp", "application/x-gss", "application/x-gtar",
"application/x-gtar", "application/x-gtar", "application/x-gzip",
"application/x-gzip", "application/x-gzip", "application/x-hdf",
"application/x-helpfile", "application/x-helpfile", "application/x-httpd-imap",
"application/x-httpd-php", "application/x-httpd-php", "application/x-httpd-php",
"application/x-httpd-php-source", "application/x-httpd-php3",
"application/x-httpd-php3-preprocessed", "application/x-httpd-php4",
"application/x-ica", "application/x-ima", "application/x-install-instructions",
"application/x-internet-signup", "application/x-internet-signup",
"application/x-internett-signup", "application/x-inventor", "application/x-ip2",
"application/x-iphone", "application/x-iso9660-image", "application/x-java-archive",
"application/x-java-class", "application/x-java-commerce", "application/x-java-jnlp-file",
"application/x-java-serialized-object", "application/x-java-vm",
"application/x-javascript", "application/x-kchart", "application/x-killustrator",
"application/x-koan", "application/x-koan", "application/x-koan",
"application/x-koan", "application/x-kpresenter", "application/x-kpresenter",
"application/x-ksh", "application/x-kspread", "application/x-kword",
"application/x-kword", "application/x-latex", "application/x-latex",
"application/x-lha", "application/x-lisp", "application/x-livescreen",
"application/x-lotus", "application/x-lotusscreencam", "application/x-lua-bytecode",
"application/x-lzh", "application/x-lzh-compressed", "application/x-lzh-compressed",
"application/x-lzx", "application/x-mac-binhex40", "application/x-macbinary",
"application/x-magic-cap-package-1.0", "application/x-maker",
"application/x-maker", "application/x-maker", "application/x-maker",
"application/x-maker", "application/x-maker", "application/x-maker",
"application/x-mathcad", "application/x-meme", "application/x-midi",
"application/x-midi", "application/x-mie", "application/x-mif",
"application/x-mix-transfer", "application/x-mobipocket-ebook",
"application/x-mobipocket-ebook", "application/x-mpegURL", "application/x-mplayer2",
"application/x-ms-application", "application/x-ms-shortcut",
"application/x-ms-wmd", "application/x-ms-wmz", "application/x-ms-xbap",
"application/x-msaccess", "application/x-msbinder", "application/x-mscardfile",
"application/x-msclip", "application/x-msdos-program", "application/x-msdos-program",
"application/x-msdos-program", "application/x-msdos-program",
"application/x-msdownload", "application/x-msdownload", "application/x-msdownload",
"application/x-msdownload", "application/x-msdownload", "application/x-msexcel",
"application/x-msexcel", "application/x-msexcel", "application/x-msi",
"application/x-msmediaview", "application/x-msmediaview", "application/x-msmediaview",
"application/x-msmetafile", "application/x-msmetafile", "application/x-msmetafile",
"application/x-msmetafile", "application/x-msmoney", "application/x-mspowerpoint",
"application/x-mspublisher", "application/x-msschedule", "application/x-msterminal",
"application/x-mswrite", "application/x-navi-animation", "application/x-navidoc",
"application/x-navimap", "application/x-navistyle", "application/x-netcdf",
"application/x-netcdf", "application/x-newton-compatible-pkg",
"application/x-nokia-9000-communicator-add-on-software", "application/x-ns-proxy-autoconfig",
"application/x-nwc", "application/x-nzb", "application/x-object",
"application/x-omc", "application/x-omcdatamaker", "application/x-omcregerator",
"application/x-oz-application", "application/x-pagemaker", "application/x-pagemaker",
"application/x-pcl", "application/x-perfmon", "application/x-perfmon",
"application/x-perfmon", "application/x-perfmon", "application/x-perfmon",
"application/x-pixclscript", "application/x-pkcs10", "application/x-pkcs12",
"application/x-pkcs12", "application/x-pkcs7-certificates", "application/x-pkcs7-certificates",
"application/x-pkcs7-certreqresp", "application/x-pkcs7-crl",
"application/x-pkcs7-mime", "application/x-pkcs7-mime", "application/x-pkcs7-signature",
"application/x-pkcs7-signature", "application/x-pointplus", "application/x-portable-anymap",
"application/x-project", "application/x-project", "application/x-project",
"application/x-project", "application/x-python-code", "application/x-python-code",
"application/x-qpro", "application/x-quicktimeplayer", "application/x-rar-compressed",
"application/x-redhat-package-manager", "application/x-research-info-systems",
"application/x-rpm", "application/x-rtf", "application/x-sdp",
"application/x-sea", "application/x-seelogo", "application/x-sh",
"application/x-shar", "application/x-shar", "application/x-shockwave-flash",
"application/x-shockwave-flash", "application/x-silverlight-app",
"application/x-sit", "application/x-sprite", "application/x-sprite",
"application/x-sql", "application/x-stuffit", "application/x-stuffitx",
"application/x-subrip", "application/x-sv4cpio", "application/x-sv4crc",
"application/x-t3vm-image", "application/x-tads", "application/x-tar",
"application/x-tbook", "application/x-tbook", "application/x-tcl",
"application/x-tex", "application/x-tex-gf", "application/x-tex-pk",
"application/x-tex-tfm", "application/x-texinfo", "application/x-texinfo",
"application/x-tgif", "application/x-trash", "application/x-trash",
"application/x-trash", "application/x-trash", "application/x-trash",
"application/x-troff", "application/x-troff", "application/x-troff",
"application/x-troff-man", "application/x-troff-me", "application/x-troff-ms",
"application/x-troff-msvideo", "application/x-ustar", "application/x-visio",
"application/x-visio", "application/x-visio", "application/x-vnd.audioexplosion.mzz",
"application/x-vnd.ls-xpix", "application/x-vrml", "application/x-wais-source",
"application/x-wais-source", "application/x-web-app-manifest+json",
"application/x-wingz", "application/x-winhelp", "application/x-wintalk",
"application/x-world", "application/x-world", "application/x-wpwin",
"application/x-wri", "application/x-x509-ca-cert", "application/x-x509-ca-cert",
"application/x-x509-ca-cert", "application/x-x509-user-cert",
"application/x-xcf", "application/x-xfig", "application/x-xliff+xml",
"application/x-xpinstall", "application/x-xz", "application/x-zip-compressed",
"application/x-zmachine", "application/x-zmachine", "application/x-zmachine",
"application/x-zmachine", "application/x-zmachine", "application/x-zmachine",
"application/x-zmachine", "application/x-zmachine", "application/xaml+xml",
"application/xcap-diff+xml", "application/xenc+xml", "application/xhtml+xml",
"application/xhtml+xml", "application/xml", "application/xml",
"application/xml", "application/xml-dtd", "application/xop+xml",
"application/xproc+xml", "application/xslt+xml", "application/xspf+xml",
"application/xv+xml", "application/xv+xml", "application/xv+xml",
"application/xv+xml", "application/yang", "application/yin+xml",
"application/ynd.ms-pkipko", "application/zip", "audio/adpcm",
"audio/aiff", "audio/aiff", "audio/aiff", "audio/basic", "audio/basic",
"audio/flac", "audio/it", "audio/make", "audio/make", "audio/make",
"audio/make.my.funk", "audio/mid", "audio/mid", "audio/midi",
"audio/midi", "audio/midi", "audio/midi", "audio/mod", "audio/mp4",
"audio/mp4", "audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg",
"audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg", "audio/mpeg",
"audio/mpeg", "audio/mpeg3", "audio/mpegurl", "audio/nspaudio",
"audio/nspaudio", "audio/ogg", "audio/ogg", "audio/ogg", "audio/prs.sid",
"audio/s3m", "audio/silk", "audio/tsp-audio", "audio/tsplayer",
"audio/vnd.dece.audio", "audio/vnd.dece.audio", "audio/vnd.digital-winds",
"audio/vnd.dra", "audio/vnd.dts", "audio/vnd.dts.hd", "audio/vnd.lucent.voice",
"audio/vnd.ms-playready.media.pya", "audio/vnd.nuera.ecelp4800",
"audio/vnd.nuera.ecelp7470", "audio/vnd.nuera.ecelp9600", "audio/vnd.qcelp",
"audio/vnd.rip", "audio/voc", "audio/voxware", "audio/wav", "audio/webm",
"audio/x-aac", "audio/x-adpcm", "audio/x-aiff", "audio/x-aiff",
"audio/x-aiff", "audio/x-au", "audio/x-caf", "audio/x-flac",
"audio/x-gsm", "audio/x-gsm", "audio/x-jam", "audio/x-liveaudio",
"audio/x-matroska", "audio/x-mid", "audio/x-mid", "audio/x-midi",
"audio/x-midi", "audio/x-mod", "audio/x-mpeg", "audio/x-mpeg-3",
"audio/x-mpegurl", "audio/x-mpequrl", "audio/x-ms-wax", "audio/x-ms-wma",
"audio/x-nspaudio", "audio/x-nspaudio", "audio/x-pn-realaudio",
"audio/x-pn-realaudio", "audio/x-pn-realaudio", "audio/x-pn-realaudio",
"audio/x-pn-realaudio", "audio/x-pn-realaudio-plugin", "audio/x-pn-realaudio-plugin",
"audio/x-pn-realaudio-plugin", "audio/x-psid", "audio/x-realaudio",
"audio/x-scpls", "audio/x-sd2", "audio/x-twinvq", "audio/x-twinvq-plugin",
"audio/x-twinvq-plugin", "audio/x-vnd.audioexplosion.mjuicemediafile",
"audio/x-voc", "audio/x-wav", "audio/xm", "chemical/x-cdx", "chemical/x-cif",
"chemical/x-cmdf", "chemical/x-cml", "chemical/x-csml", "chemical/x-pdb",
"chemical/x-pdb", "chemical/x-xyz", "drawing/x-dwf", "drawing/x-dwf(old)",
"font/opentype", "i-world/i-vrml", "image/bmp", "image/bmp",
"image/cgm", "image/cis-cod", "image/cmu-raster", "image/cmu-raster",
"image/fif", "image/florian", "image/florian", "image/g3fax",
"image/gif", "image/ief", "image/ief", "image/jpeg", "image/jpeg",
"image/jpeg", "image/jpeg", "image/jpeg", "image/jutvision",
"image/ktx", "image/naplps", "image/naplps", "image/pcx", "image/pict",
"image/pict", "image/pipeg", "image/pjpeg", "image/pjpeg", "image/pjpeg",
"image/pjpeg", "image/png", "image/png", "image/prs.btif", "image/sgi",
"image/svg+xml", "image/svg+xml", "image/tiff", "image/tiff",
"image/vasa", "image/vnd.adobe.photoshop", "image/vnd.dece.graphic",
"image/vnd.dece.graphic", "image/vnd.dece.graphic", "image/vnd.dece.graphic",
"image/vnd.djvu", "image/vnd.djvu", "image/vnd.dvb.subtitle",
"image/vnd.dwg", "image/vnd.dwg", "image/vnd.dwg", "image/vnd.dxf",
"image/vnd.fastbidsheet", "image/vnd.fpx", "image/vnd.fpx", "image/vnd.fst",
"image/vnd.fujixerox.edmics-mmr", "image/vnd.fujixerox.edmics-rlc",
"image/vnd.ms-modi", "image/vnd.ms-photo", "image/vnd.net-fpx",
"image/vnd.net-fpx", "image/vnd.rn-realflash", "image/vnd.rn-realpix",
"image/vnd.wap.wbmp", "image/vnd.xiff", "image/webp", "image/x-3ds",
"image/x-cmu-rast", "image/x-cmu-raster", "image/x-cmx", "image/x-coreldraw",
"image/x-coreldrawpattern", "image/x-coreldrawtemplate", "image/x-corelphotopaint",
"image/x-dwg", "image/x-dwg", "image/x-dwg", "image/x-freehand",
"image/x-freehand", "image/x-freehand", "image/x-freehand", "image/x-freehand",
"image/x-icon", "image/x-jg", "image/x-jng", "image/x-jps", "image/x-mrsid-image",
"image/x-ms-bmp", "image/x-niff", "image/x-niff", "image/x-pcx",
"image/x-photoshop", "image/x-pict", "image/x-pict", "image/x-portable-anymap",
"image/x-portable-bitmap", "image/x-portable-graymap", "image/x-portable-greymap",
"image/x-portable-pixmap", "image/x-quicktime", "image/x-quicktime",
"image/x-quicktime", "image/x-rgb", "image/x-tga", "image/x-tiff",
"image/x-tiff", "image/x-windows-bmp", "image/x-xbitmap", "image/x-xbitmap",
"image/x-xbm", "image/x-xpixmap", "image/x-xpixmap", "image/x-xwd",
"image/x-xwindowdump", "image/xbm", "image/xpm", "message/rfc822",
"message/rfc822", "message/rfc822", "message/rfc822", "message/rfc822",
"model/iges", "model/iges", "model/mesh", "model/mesh", "model/mesh",
"model/vnd.collada+xml", "model/vnd.dwf", "model/vnd.gdl", "model/vnd.gtw",
"model/vnd.mts", "model/vnd.vtu", "model/vrml", "model/vrml",
"model/vrml", "model/x-pov", "model/x3d+binary", "model/x3d+binary",
"model/x3d+vrml", "model/x3d+vrml", "model/x3d+xml", "model/x3d+xml",
"multipart/x-gzip", "multipart/x-ustar", "multipart/x-zip", "music/crescendo",
"music/crescendo", "music/x-karaoke", "paleovu/x-pv", "text/asp",
"text/cache-manifest", "text/cache-manifest", "text/calendar",
"text/calendar", "text/calendar", "text/comma-separated-values",
"text/css", "text/csv", "text/ecmascript", "text/event-stream",
"text/h323", "text/html", "text/html", "text/html", "text/html",
"text/html", "text/html", "text/html", "text/iuls", "text/javascript",
"text/mathml", "text/mcf", "text/n3", "text/pascal", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain", "text/plain", "text/plain", "text/plain",
"text/plain", "text/plain-bas", "text/prs.lines.tag", "text/richtext",
"text/richtext", "text/richtext", "text/rtf", "text/scriplet",
"text/scriptlet", "text/scriptlet", "text/sgml", "text/sgml",
"text/tab-separated-values", "text/texmacs", "text/texmacs",
"text/troff", "text/troff", "text/troff", "text/troff", "text/troff",
"text/troff", "text/turtle", "text/uri-list", "text/uri-list",
"text/uri-list", "text/uri-list", "text/uri-list", "text/vcard",
"text/vnd.abc", "text/vnd.curl", "text/vnd.curl.dcurl", "text/vnd.curl.mcurl",
"text/vnd.curl.scurl", "text/vnd.dvb.subtitle", "text/vnd.fly",
"text/vnd.fmi.flexstor", "text/vnd.graphviz", "text/vnd.in3d.3dml",
"text/vnd.in3d.spot", "text/vnd.rn-realtext", "text/vnd.sun.j2me.app-descriptor",
"text/vnd.wap.si", "text/vnd.wap.sl", "text/vnd.wap.wml", "text/vnd.wap.wmlscript",
"text/vtt", "text/webviewhtml", "text/x-asm", "text/x-asm", "text/x-audiosoft-intra",
"text/x-c", "text/x-c", "text/x-c", "text/x-c", "text/x-c", "text/x-c",
"text/x-c", "text/x-c++hdr", "text/x-c++hdr", "text/x-c++hdr",
"text/x-c++hdr", "text/x-c++src", "text/x-c++src", "text/x-c++src",
"text/x-c++src", "text/x-chdr", "text/x-component", "text/x-csh",
"text/x-csrc", "text/x-fortran", "text/x-fortran", "text/x-fortran",
"text/x-fortran", "text/x-h", "text/x-h", "text/x-java", "text/x-java-source",
"text/x-java-source", "text/x-la-asf", "text/x-lua", "text/x-m",
"text/x-markdown", "text/x-markdown", "text/x-markdown", "text/x-moc",
"text/x-nfo", "text/x-opml", "text/x-pascal", "text/x-pascal",
"text/x-pcs-gcd", "text/x-perl", "text/x-perl", "text/x-python",
"text/x-script", "text/x-script.csh", "text/x-script.elisp",
"text/x-script.guile", "text/x-script.ksh", "text/x-script.lisp",
"text/x-script.perl", "text/x-script.perl-module", "text/x-script.phyton",
"text/x-script.rexx", "text/x-script.scheme", "text/x-script.sh",
"text/x-script.tcl", "text/x-script.tcsh", "text/x-script.zsh",
"text/x-server-parsed-html", "text/x-server-parsed-html", "text/x-setext",
"text/x-sfv", "text/x-sgml", "text/x-sgml", "text/x-sh", "text/x-speech",
"text/x-speech", "text/x-tcl", "text/x-tcl", "text/x-tex", "text/x-tex",
"text/x-tex", "text/x-tex", "text/x-uil", "text/x-uuencode",
"text/x-uuencode", "text/x-vcalendar", "text/x-vcard", "text/xml",
"video/3gpp", "video/3gpp2", "video/MP2T", "video/animaflex",
"video/avi", "video/avs-video", "video/dl", "video/flc", "video/flc",
"video/fli", "video/fli", "video/gl", "video/h261", "video/h263",
"video/h264", "video/jpeg", "video/jpm", "video/jpm", "video/mj2",
"video/mj2", "video/mp4", "video/mp4", "video/mp4", "video/mpeg",
"video/mpeg", "video/mpeg", "video/mpeg", "video/mpeg", "video/mpeg",
"video/mpeg", "video/mpeg", "video/mpeg", "video/msvideo", "video/ogg",
"video/quicktime", "video/quicktime", "video/quicktime", "video/vdo",
"video/vivo", "video/vivo", "video/vnd.dece.hd", "video/vnd.dece.hd",
"video/vnd.dece.mobile", "video/vnd.dece.mobile", "video/vnd.dece.pd",
"video/vnd.dece.pd", "video/vnd.dece.sd", "video/vnd.dece.sd",
"video/vnd.dece.video", "video/vnd.dece.video", "video/vnd.dvb.file",
"video/vnd.fvt", "video/vnd.mpegurl", "video/vnd.mpegurl", "video/vnd.ms-playready.media.pyv",
"video/vnd.rn-realvideo", "video/vnd.uvvu.mp4", "video/vnd.uvvu.mp4",
"video/vnd.vivo", "video/vnd.vivo", "video/vosaic", "video/webm",
"video/x-amt-demorun", "video/x-amt-showrun", "video/x-atomic3d-feature",
"video/x-dl", "video/x-dv", "video/x-dv", "video/x-f4v", "video/x-fli",
"video/x-flv", "video/x-gl", "video/x-isvideo", "video/x-la-asf",
"video/x-la-asf", "video/x-m4v", "video/x-matroska", "video/x-matroska",
"video/x-matroska", "video/x-mng", "video/x-motion-jpeg", "video/x-mpeg",
"video/x-mpeg", "video/x-mpeq2a", "video/x-ms-asf", "video/x-ms-asf",
"video/x-ms-asf", "video/x-ms-asf-plugin", "video/x-ms-vob",
"video/x-ms-wm", "video/x-ms-wmv", "video/x-ms-wmx", "video/x-ms-wvx",
"video/x-msvideo", "video/x-qtc", "video/x-scm", "video/x-sgi-movie",
"video/x-sgi-movie", "video/x-smv", "windows/metafile", "www/mime",
"x-conference/x-cooltalk", "x-music/x-midi", "x-music/x-midi",
"x-world/x-3dmf", "x-world/x-3dmf", "x-world/x-3dmf", "x-world/x-3dmf",
"x-world/x-svr", "x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml",
"x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml", "x-world/x-vrml",
"x-world/x-vrt", "xgl/drawing", "xgl/movie", "application/mathematica",
"application/msword", "application/octet-stream", "application/onenote",
"application/pgp-signature", "application/pkcs7-mime", "application/postscript",
"application/smil+xml", "application/vnd.acucorp", "application/vnd.clonk.c4group",
"application/vnd.eszigno3+xml", "application/vnd.fdsn.seed",
"application/vnd.framemaker", "application/vnd.geometry-explorer",
"application/vnd.grafeq", "application/vnd.ibm.modcap", "application/vnd.iccprofile",
"application/vnd.intercon.formnet", "application/vnd.kahootz",
"application/vnd.kde.kpresenter", "application/vnd.kde.kword",
"application/vnd.kinar", "application/vnd.koan", "application/vnd.ms-excel",
"application/vnd.ms-powerpoint", "application/vnd.ms-project",
"application/vnd.ms-works", "application/vnd.palm", "application/vnd.quark.quarkxpress",
"application/vnd.simtech-mindmapper", "application/vnd.solent.sdkm+xml",
"application/vnd.sus-calendar", "application/vnd.symbian.install",
"application/vnd.ufdl", "application/vnd.visio", "application/vnd.zul",
"application/x-authorware-bin", "application/x-bzip2", "application/x-debian-package",
"application/x-director", "application/x-font-ttf", "application/x-font-type1",
"application/x-mobipocket-ebook", "application/x-msdownload",
"application/x-msmediaview", "application/x-netcdf", "application/x-pkcs12",
"application/x-pkcs7-certificates", "application/x-texinfo",
"application/x-x509-ca-cert", "application/xhtml+xml", "application/xml",
"application/xv+xml", "audio/basic", "audio/midi", "audio/mpeg",
"audio/ogg", "audio/x-aiff", "audio/x-pn-realaudio", "image/jpeg",
"image/svg+xml", "image/tiff", "image/vnd.djvu", "image/x-freehand",
"image/x-pict", "message/rfc822", "model/iges", "model/mesh",
"model/vrml", "text/calendar", "text/html", "text/plain", "text/sgml",
"text/troff", "text/uri-list", "text/x-asm", "text/x-c", "text/x-fortran",
"text/x-pascal", "video/jpm", "video/mj2", "video/mp4", "video/mpeg",
"video/quicktime", "video/vnd.mpegurl", "video/x-ms-asf")), row.names = c(NA,
-1763L), class = c("tbl_df", "tbl", "data.frame"), .Names = c("extension",
"mime_type")) -> simplemagic_mime_db
|
about_panel <- '
<!-- ######### ABOUT WELL ######### -->
<div class="col-sm-3">
<div class="panel-group" id="accordion3">
<div class="panel panel-info">
<div class="panel-heading" data-toggle="collapse" data-parent="#accordion3" data-target="#collapse3">
<h4 class="panel-title accordion-toggle">About</h4>
</div>
<div id="collapse3" class="panel-collapse collapse">
<div class="panel-body">
<i>(c) 2015 by <a href="mailto:felix@nicerbead.de">Felix Schönbrodt</a> (<a href="http://www.nicebread.de">www.nicebread.de</a>). The source code of this app is licensed under the <a href="https://creativecommons.org/licenses/by/4.0/">CC-BY 4.0</a> license and will soon be published on Github.</i>
<h3>Citation</h3>
Programming this app took a considerable effort and amount of time. If you use it in your research or teaching, please consider citing the app:
<br/><br/>
Schönbrodt, F. D. (2015). <i>p-hacker: Train your p-hacking skills!</i> Retrieved from http://shinyapps.org/apps/p-hacker/.
<br/><br/>
</div>
</div>
</div>
</div>
</div>
' | /p-hacker/snippets/about.R | permissive | guhjy/p-hacker | R | false | false | 1,136 | r | about_panel <- '
<!-- ######### ABOUT WELL ######### -->
<div class="col-sm-3">
<div class="panel-group" id="accordion3">
<div class="panel panel-info">
<div class="panel-heading" data-toggle="collapse" data-parent="#accordion3" data-target="#collapse3">
<h4 class="panel-title accordion-toggle">About</h4>
</div>
<div id="collapse3" class="panel-collapse collapse">
<div class="panel-body">
<i>(c) 2015 by <a href="mailto:felix@nicerbead.de">Felix Schönbrodt</a> (<a href="http://www.nicebread.de">www.nicebread.de</a>). The source code of this app is licensed under the <a href="https://creativecommons.org/licenses/by/4.0/">CC-BY 4.0</a> license and will soon be published on Github.</i>
<h3>Citation</h3>
Programming this app took a considerable effort and amount of time. If you use it in your research or teaching, please consider citing the app:
<br/><br/>
Schönbrodt, F. D. (2015). <i>p-hacker: Train your p-hacking skills!</i> Retrieved from http://shinyapps.org/apps/p-hacker/.
<br/><br/>
</div>
</div>
</div>
</div>
</div>
' |
#!/usr/bin/env Rscript
# libraries
library("argparse")
library("readxl")
library("dplyr")
library("ggplot2")
# Input data from command line arguments
parser <- ArgumentParser(description='Analysis of StepOne fluorescence measurement results')
parser$add_argument("-f", "--fluorescence", required=T, help="Path to StepOne measurement results in xls format")
parser$add_argument("-s", "--samplesetup", required=T, help="Path to StepOne sample setup file in xls format")
parser$add_argument("-d", "--dye", default="FAM", choices=c("FAM", "SYBR", "JOE", "VIC", "TAMRA", "NEX", "ROX"))
parser$add_argument("-o", "--output", default="out", help="Output dir name")
args <- parser$parse_args()
raw_data_path <- args$fluorescence
samples_data_path <- args$samplesetup
dye <- args$dye
output_path <- args$output
# Import sheets as data.frames
raw_data <- as.data.frame(read_excel(raw_data_path, col_names = T, skip = 7))
samples_data <- as.data.frame(read_excel(samples_data_path, col_names = T, skip = 7))
# Select target channel of detection based on dye specified
if (dye %in% c("FAM", "SYBR")){
channel <- "BLUE"
} else if (dye %in% c("JOE", "VIC")) {
channel <- "GREEN"
} else if (dye %in% c("TAMRA", "NEX")) {
channel <- "YELLOW"
} else {
channel <- "RED"
}
# Prepare input data
# merge measurement results with sample data
annotated_data <- merge(raw_data, samples_data)
# remove empty wells
annotated_data <- annotated_data %>% filter(!is.na(annotated_data$`Sample Name`))
# select only columns of interest
annotated_data <- subset(annotated_data, select=c("Well", "Cycle", channel, "Sample Name"))
# rename columns
names(annotated_data)[names(annotated_data) == "Sample Name"] <- "Name"
names(annotated_data)[names(annotated_data) == channel] <- "Value"
# correct data types
annotated_data$Well <- as.factor(annotated_data$Well)
annotated_data$Name <- as.factor(annotated_data$Name)
# Plot fluorescence
raw_channel_plot <- ggplot(data = annotated_data) +
geom_line(aes(x = Cycle, y = Value, color = Well)) +
theme_bw()
samples_channel_plot <- ggplot(data = annotated_data) +
geom_boxplot(aes(x = as.factor(Cycle), y = Value, color = Name)) +
theme_bw()
# Create output files
write.table(annotated_data,
file = file.path(output_path, "fluorescence.txt"),
row.names = F, quote = F, sep = "\t")
ggsave(filename = file.path(output_path, "raw_data.png"),
plot = raw_channel_plot,
width = 250, height = 170, units = "mm")
ggsave(filename = file.path(output_path, "samples_data.png"),
plot = samples_channel_plot,
width = 250, height = 170, units = "mm")
print("Ok") | /BER_protocol.R | no_license | julie-tooi/BER-experiments-processing | R | false | false | 2,639 | r | #!/usr/bin/env Rscript
# libraries
library("argparse")
library("readxl")
library("dplyr")
library("ggplot2")
# Input data from command line arguments
parser <- ArgumentParser(description='Analysis of StepOne fluorescence measurement results')
parser$add_argument("-f", "--fluorescence", required=T, help="Path to StepOne measurement results in xls format")
parser$add_argument("-s", "--samplesetup", required=T, help="Path to StepOne sample setup file in xls format")
parser$add_argument("-d", "--dye", default="FAM", choices=c("FAM", "SYBR", "JOE", "VIC", "TAMRA", "NEX", "ROX"))
parser$add_argument("-o", "--output", default="out", help="Output dir name")
args <- parser$parse_args()
raw_data_path <- args$fluorescence
samples_data_path <- args$samplesetup
dye <- args$dye
output_path <- args$output
# Import sheets as data.frames
raw_data <- as.data.frame(read_excel(raw_data_path, col_names = T, skip = 7))
samples_data <- as.data.frame(read_excel(samples_data_path, col_names = T, skip = 7))
# Select target channel of detection based on dye specified
if (dye %in% c("FAM", "SYBR")){
channel <- "BLUE"
} else if (dye %in% c("JOE", "VIC")) {
channel <- "GREEN"
} else if (dye %in% c("TAMRA", "NEX")) {
channel <- "YELLOW"
} else {
channel <- "RED"
}
# Prepare input data
# merge measurement results with sample data
annotated_data <- merge(raw_data, samples_data)
# remove empty wells
annotated_data <- annotated_data %>% filter(!is.na(annotated_data$`Sample Name`))
# select only columns of interest
annotated_data <- subset(annotated_data, select=c("Well", "Cycle", channel, "Sample Name"))
# rename columns
names(annotated_data)[names(annotated_data) == "Sample Name"] <- "Name"
names(annotated_data)[names(annotated_data) == channel] <- "Value"
# correct data types
annotated_data$Well <- as.factor(annotated_data$Well)
annotated_data$Name <- as.factor(annotated_data$Name)
# Plot fluorescence
raw_channel_plot <- ggplot(data = annotated_data) +
geom_line(aes(x = Cycle, y = Value, color = Well)) +
theme_bw()
samples_channel_plot <- ggplot(data = annotated_data) +
geom_boxplot(aes(x = as.factor(Cycle), y = Value, color = Name)) +
theme_bw()
# Create output files
write.table(annotated_data,
file = file.path(output_path, "fluorescence.txt"),
row.names = F, quote = F, sep = "\t")
ggsave(filename = file.path(output_path, "raw_data.png"),
plot = raw_channel_plot,
width = 250, height = 170, units = "mm")
ggsave(filename = file.path(output_path, "samples_data.png"),
plot = samples_channel_plot,
width = 250, height = 170, units = "mm")
print("Ok") |
#!/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# run like dis:
# Rscript --vanilla standalone_CNV.R $sampleID
SAMPLENAME = args[1]
print(args[1])
print(SAMPLENAME)
######
# stand alone CNV plotter
# previously done:
# subset data # 2684 snps only (wider region)
# sed -n 21814,24186p L2R/ukb_l2r_chr4_v2.txt > SNCA_gene_region1_ukb_l2r_chr4_v2.txt
# sed -n 21814,24186p BAF/ukb_baf_chr4_v2.txt > SNCA_gene_region1_ukb_baf_chr4_v2.txt
# sed -n 21814,24186p BIM/ukb_snp_chr4_v2.bim > SNCA_gene_region1_ukb_bim_chr4_v2.txt
# R
# require("data.table")
# BAF <- fread("SNCA_gene_region1_ukb_baf_chr4_v2.txt",header=F)
# FAM <- fread("ukb33601_cal_chr4_v2_s488264.fam",header=F)
# BAF2 <- t(BAF)
# BAF3 <- cbind(FAM,BAF2)
# fwrite(BAF3, file="SNCA_gene_region_1_ukb_baf_chr4_v2_FINAL.txt", quote=FALSE,row.names=F,sep="\t")
require("data.table")
BAF <- fread("SNCA_gene_region_1_ukb_baf_chr4_v2_FINAL.txt",header=F)
newdata <- subset(BAF, V1 == SAMPLENAME)
# V1 - V6 is crap
newdata$V1 <- NULL
newdata$V2 <- NULL
newdata$V3 <- NULL
newdata$V4 <- NULL
newdata$V5 <- NULL
newdata$V6 <- NULL
BAF <- t(newdata)
BIM <- fread("SNCA_gene_region1_ukb_bim_chr4_v2.txt",header=F)
PLOT <- cbind(BAF,BIM)
names(PLOT) <- c("BAF","CHR","RS","CRAP","BP","A1","A2")
# L2R and BAF plots
options(scipen=20)
pdf(paste(SAMPLENAME,"_SNCA_REGIONAL_BAF_PLOT.pdf",sep=""),height=4, width=20)
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(85645250,95759466))
rect(xleft=90645250,xright = 90759447,ybottom=par("usr")[3], ytop=par("usr")[4], density=10, col = "blue")
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(90145250,91259466))
rect(xleft=90645250,xright = 90759447,ybottom=par("usr")[3], ytop=par("usr")[4], density=10, col = "blue")
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(90645250,90759466))
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
dev.off()
###
# DONE
| /standalone_CNV_BAF.R | no_license | JujiaoKang/UKbiobank_SNCA | R | false | false | 2,089 | r | #!/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# run like dis:
# Rscript --vanilla standalone_CNV.R $sampleID
SAMPLENAME = args[1]
print(args[1])
print(SAMPLENAME)
######
# stand alone CNV plotter
# previously done:
# subset data # 2684 snps only (wider region)
# sed -n 21814,24186p L2R/ukb_l2r_chr4_v2.txt > SNCA_gene_region1_ukb_l2r_chr4_v2.txt
# sed -n 21814,24186p BAF/ukb_baf_chr4_v2.txt > SNCA_gene_region1_ukb_baf_chr4_v2.txt
# sed -n 21814,24186p BIM/ukb_snp_chr4_v2.bim > SNCA_gene_region1_ukb_bim_chr4_v2.txt
# R
# require("data.table")
# BAF <- fread("SNCA_gene_region1_ukb_baf_chr4_v2.txt",header=F)
# FAM <- fread("ukb33601_cal_chr4_v2_s488264.fam",header=F)
# BAF2 <- t(BAF)
# BAF3 <- cbind(FAM,BAF2)
# fwrite(BAF3, file="SNCA_gene_region_1_ukb_baf_chr4_v2_FINAL.txt", quote=FALSE,row.names=F,sep="\t")
require("data.table")
BAF <- fread("SNCA_gene_region_1_ukb_baf_chr4_v2_FINAL.txt",header=F)
newdata <- subset(BAF, V1 == SAMPLENAME)
# V1 - V6 is crap
newdata$V1 <- NULL
newdata$V2 <- NULL
newdata$V3 <- NULL
newdata$V4 <- NULL
newdata$V5 <- NULL
newdata$V6 <- NULL
BAF <- t(newdata)
BIM <- fread("SNCA_gene_region1_ukb_bim_chr4_v2.txt",header=F)
PLOT <- cbind(BAF,BIM)
names(PLOT) <- c("BAF","CHR","RS","CRAP","BP","A1","A2")
# L2R and BAF plots
options(scipen=20)
pdf(paste(SAMPLENAME,"_SNCA_REGIONAL_BAF_PLOT.pdf",sep=""),height=4, width=20)
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(85645250,95759466))
rect(xleft=90645250,xright = 90759447,ybottom=par("usr")[3], ytop=par("usr")[4], density=10, col = "blue")
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(90145250,91259466))
rect(xleft=90645250,xright = 90759447,ybottom=par("usr")[3], ytop=par("usr")[4], density=10, col = "blue")
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
plot(PLOT$BP,PLOT$BAF,pch=20,ylab="B allele frequency",xlab="CHR 4 basepair",xlim=c(90645250,90759466))
abline(h=0.66, col="blue")
abline(h=0.33, col="blue")
dev.off()
###
# DONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energy_balance.r
\name{biochemical.energy}
\alias{biochemical.energy}
\title{Biochemical Energy}
\usage{
biochemical.energy(NEE, alpha = 0.422)
}
\arguments{
\item{NEE}{Net ecosystem exchange (umol CO2 m-2 s-1)}
\item{alpha}{Energy taken up/released by photosynthesis/respiration per mol CO2 fixed/respired (J umol-1)}
}
\value{
\item{Sp -}{biochemical energy (W m-2)}
}
\description{
Radiant energy absorbed in photosynthesis or heat release by respiration calculated
from net ecosystem exchange of CO2 (NEE).
}
\details{
The following sign convention is employed: NEE is negative when carbon is taken up by the ecosystem.
Positive values of the resulting biochemical energy mean that energy (heat) is taken up by the ecosystem,
negative ones that heat is released.
The value of alpha is taken from Nobel 1974 (see Meyers & Hollinger 2004), but other values
have been used (e.g. Blanken et al., 1997)
}
\examples{
# Calculate biochemical energy taken up by the ecosystem with
# a measured NEE of -30umol CO2 m-2 s-1
biochemical.energy(NEE=-30)
}
\references{
Meyers, T.P., Hollinger, S.E. 2004: An assessment of storage terms in the surface energy
balance of maize and soybean. Agricultural and Forest Meteorology 125, 105-115.
Nobel, P.S., 1974: Introduction to Biophysical Plant Physiology.
Freeman, New York.
Blanken, P.D. et al., 1997: Energy balance and canopy conductance of a boreal aspen
forest: Partitioning overstory and understory components.
Journal of Geophysical Research 102, 28915-28927.
}
| /man/biochemical.energy.Rd | no_license | cran/bigleaf | R | false | true | 1,773 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energy_balance.r
\name{biochemical.energy}
\alias{biochemical.energy}
\title{Biochemical Energy}
\usage{
biochemical.energy(NEE, alpha = 0.422)
}
\arguments{
\item{NEE}{Net ecosystem exchange (umol CO2 m-2 s-1)}
\item{alpha}{Energy taken up/released by photosynthesis/respiration per mol CO2 fixed/respired (J umol-1)}
}
\value{
\item{Sp -}{biochemical energy (W m-2)}
}
\description{
Radiant energy absorbed in photosynthesis or heat release by respiration calculated
from net ecosystem exchange of CO2 (NEE).
}
\details{
The following sign convention is employed: NEE is negative when carbon is taken up by the ecosystem.
Positive values of the resulting biochemical energy mean that energy (heat) is taken up by the ecosystem,
negative ones that heat is released.
The value of alpha is taken from Nobel 1974 (see Meyers & Hollinger 2004), but other values
have been used (e.g. Blanken et al., 1997)
}
\examples{
# Calculate biochemical energy taken up by the ecosystem with
# a measured NEE of -30umol CO2 m-2 s-1
biochemical.energy(NEE=-30)
}
\references{
Meyers, T.P., Hollinger, S.E. 2004: An assessment of storage terms in the surface energy
balance of maize and soybean. Agricultural and Forest Meteorology 125, 105-115.
Nobel, P.S., 1974: Introduction to Biophysical Plant Physiology.
Freeman, New York.
Blanken, P.D. et al., 1997: Energy balance and canopy conductance of a boreal aspen
forest: Partitioning overstory and understory components.
Journal of Geophysical Research 102, 28915-28927.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-summaryh.R
\name{stat_summaryh}
\alias{stat_summaryh}
\title{Horizontal summary.}
\usage{
stat_summaryh(mapping = NULL, data = NULL, geom = "pointrangeh",
position = "identity", ..., fun.data = NULL, fun.x = NULL,
fun.xmax = NULL, fun.xmin = NULL, fun.args = list(),
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}} or
\code{\link[=aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data.}
\item{geom}{Use to override the default connection between
\code{geom_histogram()}/\code{geom_freqpoly()} and \code{stat_bin()}.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{fun.data}{A function that is given the complete data and should
return a data frame with variables \code{xmin}, \code{x}, and \code{xmax}.}
\item{fun.xmin, fun.x, fun.xmax}{Alternatively, supply three individual
functions that are each passed a vector of x's and should return a
single number.}
\item{fun.args}{Optional additional arguments passed on to the functions.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
}
\description{
Horizontal version of \code{\link[ggplot2]{stat_summary}}().
}
| /man/stat_summaryh.Rd | no_license | mjskay/ggstance | R | false | true | 2,953 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-summaryh.R
\name{stat_summaryh}
\alias{stat_summaryh}
\title{Horizontal summary.}
\usage{
stat_summaryh(mapping = NULL, data = NULL, geom = "pointrangeh",
position = "identity", ..., fun.data = NULL, fun.x = NULL,
fun.xmax = NULL, fun.xmin = NULL, fun.args = list(),
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}} or
\code{\link[=aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data.}
\item{geom}{Use to override the default connection between
\code{geom_histogram()}/\code{geom_freqpoly()} and \code{stat_bin()}.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{fun.data}{A function that is given the complete data and should
return a data frame with variables \code{xmin}, \code{x}, and \code{xmax}.}
\item{fun.xmin, fun.x, fun.xmax}{Alternatively, supply three individual
functions that are each passed a vector of x's and should return a
single number.}
\item{fun.args}{Optional additional arguments passed on to the functions.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
}
\description{
Horizontal version of \code{\link[ggplot2]{stat_summary}}().
}
|
\name{.JavaTerminate}
\alias{.JavaTerminate}
\title{Terminates the Java Virtual Machine}
\description{
Unloads the Java Virtual Machine, releasing its
resources and terminating the Omegahat session.
\textit{Once the JVM is terminated, it cannot
be restarted within this R session.}
}
\usage{
.JavaTerminate()
}
\details{
This just calls the internal routine which
notifies the JVM that it should terminate.
Exactly how this action is performed depends
on the current state of the JVM and the threads that
are active.
}
\value{
\code{T} indicating that the JVM is
terminated and should not be used.
}
\references{\url{http://www.omegahat.org/RSJava}}
\author{Duncan Temple Lang}
\seealso{
\code{\link{.JavaInit}}
}
\examples{
\dontrun{
# active the JVM only to find out what
# version of Java it supports.
# No further activity can take place in the
# Java session.
#
.JavaInit()
jversion <- .Java("System", "getProperty", "java.version")
.JavaTerminate()
}
}
\keyword{Java}
| /man/JavaTerminate.Rd | no_license | cran/Java | R | false | false | 1,000 | rd | \name{.JavaTerminate}
\alias{.JavaTerminate}
\title{Terminates the Java Virtual Machine}
\description{
Unloads the Java Virtual Machine, releasing its
resources and terminating the Omegahat session.
\textit{Once the JVM is terminated, it cannot
be restarted within this R session.}
}
\usage{
.JavaTerminate()
}
\details{
This just calls the internal routine which
notifies the JVM that it should terminate.
Exactly how this action is performed depends
on the current state of the JVM and the threads that
are active.
}
\value{
\code{T} indicating that the JVM is
terminated and should not be used.
}
\references{\url{http://www.omegahat.org/RSJava}}
\author{Duncan Temple Lang}
\seealso{
\code{\link{.JavaInit}}
}
\examples{
\dontrun{
# active the JVM only to find out what
# version of Java it supports.
# No further activity can take place in the
# Java session.
#
.JavaInit()
jversion <- .Java("System", "getProperty", "java.version")
.JavaTerminate()
}
}
\keyword{Java}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat_cor.R
\name{stat_cor}
\alias{stat_cor}
\title{Add Correlation Coefficients with P-values to a Scatter Plot}
\usage{
stat_cor(mapping = NULL, data = NULL, method = "pearson",
label.sep = ", ", label.x.npc = "left", label.y.npc = "top",
label.x = NULL, label.y = NULL, geom = "text", position = "identity",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{method}{a character string indicating which correlation coefficient (or
covariance) is to be computed. One of "pearson" (default), "kendall", or
"spearman".}
\item{label.sep}{a character string to separate the terms. Default is ", ", to
separate the correlation coefficient and the p.value.}
\item{label.x.npc, label.y.npc}{can be \code{numeric} or \code{character}
vector of the same length as the number of groups and/or panels. If too
short they will be recycled. \itemize{ \item If \code{numeric}, value
should be between 0 and 1. Coordinates to be used for positioning the
label, expressed in "normalized parent coordinates". \item If
\code{character}, allowed values include: i) one of c('right', 'left',
'center', 'centre', 'middle') for x-axis; ii) and one of c( 'bottom',
'top', 'center', 'centre', 'middle') for y-axis.}
If too short they will be recycled.}
\item{label.x, label.y}{\code{numeric} Coordinates (in data units) to be used
for absolute positioning of the label. If too short they will be recycled.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{na.rm}{If FALSE (the default), removes missing values with a warning.
If TRUE silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{...}{other arguments to pass to \code{\link[ggplot2]{geom_text}} or
\code{\link[ggplot2]{geom_label}}.}
}
\description{
Add correlation coefficients with p-values to a scatter plot.
}
\examples{
# Load data
data("mtcars")
df <- mtcars
df$cyl <- as.factor(df$cyl)
# Scatter plot with correlation coefficient
#:::::::::::::::::::::::::::::::::::::::::::::::::
sp <- ggscatter(df, x = "wt", y = "mpg",
add = "reg.line", # Add regressin line
add.params = list(color = "blue", fill = "lightgray"), # Customize reg. line
conf.int = TRUE # Add confidence interval
)
# Add correlation coefficient
sp + stat_cor(method = "pearson", label.x = 3, label.y = 30)
# Color by groups and facet
#::::::::::::::::::::::::::::::::::::::::::::::::::::
sp <- ggscatter(df, x = "wt", y = "mpg",
color = "cyl", palette = "jco",
add = "reg.line", conf.int = TRUE)
sp + stat_cor(aes(color = cyl), label.x = 3)
}
\seealso{
\code{\link{ggscatter}}
}
| /man/stat_cor.Rd | no_license | smangul1/ggpubr | R | false | true | 4,095 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat_cor.R
\name{stat_cor}
\alias{stat_cor}
\title{Add Correlation Coefficients with P-values to a Scatter Plot}
\usage{
stat_cor(mapping = NULL, data = NULL, method = "pearson",
label.sep = ", ", label.x.npc = "left", label.y.npc = "top",
label.x = NULL, label.y = NULL, geom = "text", position = "identity",
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{method}{a character string indicating which correlation coefficient (or
covariance) is to be computed. One of "pearson" (default), "kendall", or
"spearman".}
\item{label.sep}{a character string to separate the terms. Default is ", ", to
separate the correlation coefficient and the p.value.}
\item{label.x.npc, label.y.npc}{can be \code{numeric} or \code{character}
vector of the same length as the number of groups and/or panels. If too
short they will be recycled. \itemize{ \item If \code{numeric}, value
should be between 0 and 1. Coordinates to be used for positioning the
label, expressed in "normalized parent coordinates". \item If
\code{character}, allowed values include: i) one of c('right', 'left',
'center', 'centre', 'middle') for x-axis; ii) and one of c( 'bottom',
'top', 'center', 'centre', 'middle') for y-axis.}
If too short they will be recycled.}
\item{label.x, label.y}{\code{numeric} Coordinates (in data units) to be used
for absolute positioning of the label. If too short they will be recycled.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{na.rm}{If FALSE (the default), removes missing values with a warning.
If TRUE silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{...}{other arguments to pass to \code{\link[ggplot2]{geom_text}} or
\code{\link[ggplot2]{geom_label}}.}
}
\description{
Add correlation coefficients with p-values to a scatter plot.
}
\examples{
# Load data
data("mtcars")
df <- mtcars
df$cyl <- as.factor(df$cyl)
# Scatter plot with correlation coefficient
#:::::::::::::::::::::::::::::::::::::::::::::::::
sp <- ggscatter(df, x = "wt", y = "mpg",
add = "reg.line", # Add regressin line
add.params = list(color = "blue", fill = "lightgray"), # Customize reg. line
conf.int = TRUE # Add confidence interval
)
# Add correlation coefficient
sp + stat_cor(method = "pearson", label.x = 3, label.y = 30)
# Color by groups and facet
#::::::::::::::::::::::::::::::::::::::::::::::::::::
sp <- ggscatter(df, x = "wt", y = "mpg",
color = "cyl", palette = "jco",
add = "reg.line", conf.int = TRUE)
sp + stat_cor(aes(color = cyl), label.x = 3)
}
\seealso{
\code{\link{ggscatter}}
}
|
#' moReno - an R package for Jacob Moreno's sociometry technique
#'
#' Provides helpers for calculating sociometric indexes (both group and individual) and sociogram plot. Allows data import from CSV.
#' @docType package
#' @name moreno-package
NULL
| /R/moreno.R | no_license | aL3xa/moReno | R | false | false | 250 | r | #' moReno - an R package for Jacob Moreno's sociometry technique
#'
#' Provides helpers for calculating sociometric indexes (both group and individual) and sociogram plot. Allows data import from CSV.
#' @docType package
#' @name moreno-package
NULL
|
#' Class to Store Sealevel Data
#'
#' This class stores sealevel data, e.g. from a tide gauge.
#'
#' @templateVar class sealevel
#'
#' @templateVar dataExample The key items stored in this slot are `time` and `elevation`.
#'
#' @templateVar metadataExample An example of the former might be the location at which a `sealevel` measurement was made, stored in `longitude` and `latitude`, and of the latter might be `filename`, the name of the data source.
#'
#' @template slot_summary
#'
#' @template slot_put
#'
#' @template slot_get
#'
#' @author Dan Kelley
#'
#' @family classes provided by oce
#' @family things related to sealevel data
setClass("sealevel", contains="oce")
#' @title Sealevel data for Halifax Harbour
#'
#' @description
#' This sample sea-level dataset is the 2003 record from Halifax Harbour in
#' Nova Scotia, Canada. For reasons that are not mentioned on the data archive
#' website, the record ends on the 8th of October.
#'
#' @name sealevel
#'
#' @docType data
#'
#' @author Dan Kelley
#'
#' @source The data were created as \preformatted{ sealevel <-
#' read.oce("490-01-JAN-2003_slev.csv") sealevel <- oce.edit(sealevel,
#' "longitude", -sealevel[["longitude"]], reason="Fix longitude hemisphere") }
#' where the csv file was downloaded from reference 1. Note the correction of longitude
#' sign, which is required because the data file has no indication that this is
#' the western hemisphere.
#'
#' @references
#' 1. Fisheries and Oceans Canada \url{http://www.meds-sdmm.dfo-mpo.gc.ca/isdm-gdsi/index-eng.html}
#'
#' @family datasets provided with oce
#' @family things related to sealevel data
NULL
#' Sea-level data set acquired in 1975 at Tuktoyaktuk
#'
#' This sea-level dataset is provided with in Appendix 7.2 of Foreman (1977)
#' and also with the `T_TIDE` package (Pawlowicz et al., 2002). It results
#' from measurements made in 1975 at Tuktoyaktuk, Northwest Territories,
#' Canada.
#'
#' The data set contains 1584 points, some of which have NA for sea-level
#' height.
#'
#' Although Foreman's Appendix 7.2 states that times are in Mountain standard
#' time, the timezone is set to `UTC` in the present case, so that the
#' results will be similar to those he provides in his Appendix 7.3.
#'
#' @name sealevelTuktoyaktuk
#'
#' @docType data
#'
#' @references Foreman, M. G. G., 1977. Manual for tidal heights analysis and
#' prediction. Pacific Marine Science Report 77-10, Institute of Ocean
#' Sciences, Patricia Bay, Sidney, BC, 58pp.
#'
#' Pawlowicz, Rich, Bob Beardsley, and Steve Lentz, 2002. Classical tidal
#' harmonic analysis including error estimates in MATLAB using `T_TIDE`.
#' Computers and Geosciences, 28, 929-937.
#'
#' @source The data were based on the `T_TIDE` dataset, which in turn
#' seems to be based on Appendix 7.2 of Foreman (1977). Minor editing was on
#' file format, and then the `sealevelTuktoyaktuk` object was created
#' using [as.sealevel()].
#'
#' @examples
#'\donttest{
#' library(oce)
#' data(sealevelTuktoyaktuk)
#' time <- sealevelTuktoyaktuk[["time"]]
#' elevation <- sealevelTuktoyaktuk[["elevation"]]
#' oce.plot.ts(time, elevation, type='l', ylab="Height [m]", ylim=c(-2, 6))
#' legend("topleft", legend=c("Tuktoyaktuk (1975)","Detided"),
#' col=c("black","red"),lwd=1)
#' tide <- tidem(sealevelTuktoyaktuk)
#' detided <- elevation - predict(tide)
#' lines(time, detided, col="red")
#'}
#'
#' @section Historical note:
#' Until Jan 6, 2018, the time in this dataset had been increased
#' by 7 hours. However, this alteration was removed on this date,
#' to make for simpler comparison of amplitude and phase output with
#' the results obtained by Foreman (1977) and Pawlowicz et al. (2002).
#'
#' @family datasets provided with oce
#' @family things related to sealevel data
NULL
setMethod(f="initialize",
signature="sealevel",
definition=function(.Object, elevation, time, ...) {
.Object <- callNextMethod(.Object, ...)
if (!missing(elevation))
.Object@data$elevation <- elevation
if (!missing(time))
.Object@data$time <- time
.Object@processingLog$time <- presentTime()
.Object@processingLog$value <- "create 'sealevel' object"
return(.Object)
})
#' @title Summarize a Sealevel Object
#'
#' @description
#' Summarizes some of the data in a sealevel object.
#'
#' @param object A [sealevel-class] object.
#'
#' @param \dots further arguments passed to or from other methods.
#'
#' @return A matrix containing statistics of the elements of the `data`
#' slot.
#'
#' @author Dan Kelley
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' summary(sealevel)
#'
#' @family things related to sealevel data
setMethod(f="summary",
signature="sealevel",
definition=function(object, ...) {
cat("Sealevel Summary\n----------------\n\n")
showMetadataItem(object, "stationNumber", "number: ")
showMetadataItem(object, "version", "version: ")
showMetadataItem(object, "stationName", "name: ")
showMetadataItem(object, "region", "region: ")
showMetadataItem(object, "deltat", "sampling delta-t: ")
cat("* Location: ", latlonFormat(object@metadata$latitude,
object@metadata$longitude,
digits=5), "\n")
showMetadataItem(object, "year", "year: ")
ndata <- length(object@data$elevation)
cat("* number of observations: ", ndata, "\n")
cat("* \" non-missing: ", sum(!is.na(object@data$elevation)), "\n")
invisible(callNextMethod()) # summary
})
#' @title Subset a Sealevel Object
#'
#' @description
#' This function is somewhat analogous to [subset.data.frame()], but
#' subsetting is only permitted by time.
#'
#' @param x a [sealevel-class] object.
#'
#' @param subset a condition to be applied to the `data` portion of
#' `x`.
#'
#' @param \dots ignored.
#'
#' @return A new `sealevel` object.
#'
#' @author Dan Kelley
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' plot(sealevel)
#' plot(subset(sealevel, time < mean(range(sealevel[['time']]))))
#'
#' @family things related to sealevel data
#' @family functions that subset oce objects
setMethod(f="subset",
signature="sealevel",
definition=function(x, subset, ...) {
res <- new("sealevel")
res@metadata <- x@metadata
res@processingLog <- x@processingLog
for (i in seq_along(x@data)) {
r <- eval(substitute(subset), x@data, parent.frame(2))
r <- r & !is.na(r)
res@data[[i]] <- x@data[[i]][r]
}
names(res@data) <- names(x@data)
subsetString <- paste(deparse(substitute(subset)), collapse=" ")
res@processingLog <- processingLogAppend(res@processingLog, paste("subset.sealevel(x, subset=", subsetString, ")", sep=""))
res
})
#' @title Extract Something From a Sealevel Object
#'
#' @param x a [sealevel-class] object.
#'
#' @template sub_subTemplate
#'
#' @family things related to sealevel data
setMethod(f="[[",
signature(x="sealevel", i="ANY", j="ANY"),
definition=function(x, i, j, ...) {
callNextMethod() # [[
})
#' @title Replace Parts of a Sealevel Object
#'
#' @param x a [sealevel-class] object.
#'
#' @template sub_subsetTemplate
#'
#' @family things related to sealevel data
setMethod(f="[[<-",
signature(x="sealevel", i="ANY", j="ANY"),
definition=function(x, i, j, ..., value) {
callNextMethod(x=x, i=i, j=j, ...=..., value=value) # [[<-
})
setValidity("sealevel",
function(object) {
ndata <- length(object@data)
lengths <- vector("numeric", ndata)
for (i in 1:ndata)
lengths[i] <- length(object@data[[i]])
if (var(lengths) != 0) {
cat("lengths of data elements are unequal\n")
return(FALSE)
} else
return(TRUE)
})
#' Coerce Data Into a Sealevel Object
#'
#' Coerces a dataset (minimally, a sequence of times and heights) into a
#' sealevel dataset.
#' The arguments are based on the standard data format, as were described in a
#' file formerly available at reference 1.
#'
#' @param elevation a list of sea-level heights in metres, in an hourly
#' sequence.
#'
#' @param time optional list of times, in POSIXct format. If missing, the list
#' will be constructed assuming hourly samples, starting at 0000-01-01
#' 00:00:00.
#'
#' @param header a character string as read from first line of a standard data
#' file.
#'
#' @param stationNumber three-character string giving station number.
#'
#' @param stationVersion single character for version of station.
#'
#' @param stationName the name of station (at most 18 characters).
#'
#' @param region the name of the region or country of station (at most 19
#' characters).
#'
#' @param year the year of observation.
#'
#' @param longitude the longitude in decimal degrees, positive east of
#' Greenwich.
#'
#' @param latitude the latitude in decimal degrees, positive north of the
#' equator.
#'
#' @param GMTOffset offset from GMT, in hours.
#'
#' @param decimationMethod a coded value, with 1 meaning filtered, 2 meaning a
#' simple average of all samples, 3 meaning spot readings, and 4 meaning some
#' other method.
#'
#' @param referenceOffset ?
#'
#' @param referenceCode ?
#'
#' @param deltat optional interval between samples, in hours (as for the
#' [ts()] timeseries function). If this is not provided, and `t`
#' can be understood as a time, then the difference between the first two times
#' is used. If this is not provided, and `t` cannot be understood as a
#' time, then 1 hour is assumed.
#'
#' @return A [sealevel-class] object (for details, see [read.sealevel()]).
#'
#' @author Dan Kelley
#'
#' @seealso The documentation for the [sealevel-class] class explains the
#' structure of sealevel objects, and also outlines the other functions dealing
#' with them.
#'
#' @references `http://ilikai.soest.hawaii.edu/rqds/hourly.fmt` (this link
#' worked for years but failed at least temporarily on December 4, 2016).
#'
#' @examples
#' library(oce)
#'
#' # Construct a year of M2 tide, starting at the default time
#' # 0000-01-01T00:00:00.
#' h <- seq(0, 24*365)
#' elevation <- 2.0 * sin(2*pi*h/12.4172)
#' sl <- as.sealevel(elevation)
#' summary(sl)
#'
#' # As above, but start at the Y2K time.
#' time <- as.POSIXct("2000-01-01") + h * 3600
#' sl <- as.sealevel(elevation, time)
#' summary(sl)
#' @family things related to sealevel data
as.sealevel <- function(elevation,
time,
header=NULL,
stationNumber=NA,
stationVersion=NA,
stationName=NULL,
region=NULL,
year=NA,
longitude=NA, latitude=NA,
GMTOffset=NA,
decimationMethod=NA,
referenceOffset=NA,
referenceCode=NA,
deltat)
{
if (missing(elevation))
stop("must supply sealevel height, elevation, in metres")
if (inherits(elevation, "POSIXt"))
stop("elevation must be a numeric vector, not a time vector")
res <- new('sealevel')
n <- length(elevation)
if (missing(time)) {
## construct hourly from time "zero"
start <- as.POSIXct("0000-01-01 00:00:00", tz="UTC")
time <- as.POSIXct(start + seq(0, n - 1, 1) * 3600, tz="UTC")
if (is.na(GMTOffset))
GMTOffset <- 0 # FIXME: do I want to do this?
} else {
time <- as.POSIXct(time, tz="UTC")
}
if (missing(deltat))
deltat <- as.numeric(difftime(time[2], time[1], units="hours"))
if (is.na(deltat) | deltat <= 0)
deltat <- 1
res@metadata$filename <- ""
res@metadata$header <- header
res@metadata$year <- year
res@metadata$stationNumber <- stationNumber
res@metadata$stationVersion <- stationVersion
res@metadata$stationName <- stationName
res@metadata$region <- region
res@metadata$latitude <- latitude
res@metadata$longitude <- longitude
res@metadata$GMTOffset <- GMTOffset
res@metadata$decimationMethod <- decimationMethod
res@metadata$referenceOffset <- referenceOffset
res@metadata$referenceCode <- referenceCode
res@metadata$units <- list(elevation=list(unit=expression(m), scale=""))
res@metadata$n <- length(t)
res@metadata$deltat <- deltat
res@data$elevation <- elevation
res@data$time <- time
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
#' @title Plot Sealevel Data
#'
#' @description
#' Creates a plot for a sea-level dataset, in one of two varieties. Depending
#' on the length of `which`, either a single-panel or multi-panel plot is
#' drawn. If there is just one panel, then the value of `par` used in
#' `plot,sealevel-method` is retained upon exit, making it convenient to add to
#' the plot. For multi-panel plots, `par` is returned to the value it had
#' before the call.
#'
#' @param x a [sealevel-class] object.
#'
#' @param which a numerical or string vector indicating desired plot types,
#' with possibilities 1 or `"all"` for a time-series of all the elevations, 2 or
#' `"month"` for a time-series of just the first month, 3 or
#' `"spectrum"` for a power spectrum (truncated to frequencies below 0.1
#' cycles per hour, or 4 or `"cumulativespectrum"` for a cumulative
#' integral of the power spectrum.
#'
#' @param drawTimeRange boolean that applies to panels with time as the
#' horizontal axis, indicating whether to draw the time range in the top-left
#' margin of the plot.
#'
#' @param mgp 3-element numerical vector to use for [`par`]`("mgp")`, and also
#' for [`par`]`("mar")`, computed from this. The default is tighter than the R
#' default, in order to use more space for the data and less for the axes.
#'
#' @param mar value to be used with [`par`]`("mar")`.
#'
#' @param marginsAsImage boolean, `TRUE` to put a wide margin to the right
#' of time-series plots, matching the space used up by a palette in an
#' [imagep()] plot.
#'
#' @param debug a flag that turns on debugging, if it exceeds 0.
#'
#' @param \dots optional arguments passed to plotting functions.
#'
#' @return None.
#'
#' @author Dan Kelley
#'
#' @seealso The documentation for the [sealevel-class] class explains the
#' structure of sealevel objects, and also outlines the other functions dealing
#' with them.
#'
#' @section Historical Note:
#' Until 2020-02-06, sea-level plots had the mean value removed, and indicated
#' with a tick mark and margin note on the right-hand side of the plot.
#' This behaviour was confusing. The change did not go through the usual
#' deprecation process, because the margin-note behaviour had not
#' been documented.
#'
#' @references The example refers to Hurricane Juan, which caused a great deal
#' of damage to Halifax in 2003. Since this was in the era of the digital
#' photo, a casual web search will uncover some spectacular images of damage,
#' from both wind and storm surge. A map of the path of Hurricane Juan across
#' Nova Scotia is at
#' \url{http://ec.gc.ca/ouragans-hurricanes/default.asp?lang=En&n=222F51F7-1}.
#' Landfall, very near the site of this sealevel
#' gauge, was between 00:10 and 00:20 Halifax local time on Monday, Sept 29,
#' 2003.
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' ## local Halifax time is UTC + 4h
#' juan <- as.POSIXct("2003-09-29 00:15:00", tz="UTC")+4*3600
#' plot(sealevel, which=1, xlim=juan+86400*c(-7, 7))
#' abline(v=juan, col='red')
#'
#' @family functions that plot oce data
#' @family things related to sealevel data
#'
#' @aliases plot.sealevel
setMethod(f="plot",
signature=signature("sealevel"),
definition=function(x, which=1:3,
drawTimeRange=getOption("oceDrawTimeRange"),
mgp=getOption("oceMgp"),
mar=c(mgp[1]+0.5, mgp[1]+1.5, mgp[2]+1, mgp[2]+3/4),
marginsAsImage=FALSE,
debug=getOption("oceDebug"),
...)
{
oceDebug(debug, "plot.sealevel(..., mar=c(", paste(mar, collapse=", "), "), ...) {\n", sep="", unindent=1)
if ("adorn" %in% names(list(...)))
warning("In plot,adv-method() : the 'adorn' argument was removed in November 2017", call.=FALSE)
dots <- list(...)
titlePlot<-function(x)
{
title <- ""
if (!is.null(x@metadata$stationNumber) || !is.null(x@metadata$stationName) || !is.null(x@metadata$region))
title <- paste(title, gettext("Station ", domain="R-oce"),
if (!is.na(x@metadata$stationNumber)) x@metadata$stationNumber else "",
" ",
if (!is.null(x@metadata$stationName)) x@metadata$stationName else "",
" ",
if (!is.null(x@metadata$region)) x@metadata$region else "",
sep="")
if (!is.na(x@metadata$latitude) && !is.na(x@metadata$longitude))
title <- paste(title, latlonFormat(x@metadata$latitude, x@metadata$longitude), sep="")
if (nchar(title) > 0)
mtext(side=3, title, adj=1, cex=2/3)
}
drawConstituent<-function(frequency=0.0805114007, label="M2", col="darkred", side=1)
{
abline(v=frequency, col=col)
mtext(label, side=side, at=frequency, col=col, cex=3/4*par("cex"))
}
drawConstituents<-function()
{
drawConstituent(0.0387306544, "O1", side=1)
##draw.constituent(0.0416666721, "S1", side=3)
drawConstituent(0.0417807462, "K1", side=3)
drawConstituent(0.0789992488, "N2", side=1)
drawConstituent(0.0805114007, "M2", side=3)
drawConstituent(0.0833333333, "S2", side=1)
}
if (!inherits(x, "sealevel"))
stop("method is only for objects of class '", "sealevel", "'")
opar <- par(no.readonly = TRUE)
par(mgp=mgp, mar=mar)
lw <- length(which)
if (marginsAsImage) {
scale <- 0.7
w <- (1.5 + par("mgp")[2]) * par("csi") * scale * 2.54 + 0.5
if (lw > 1)
lay <- layout(matrix(1:(2*lw), nrow=lw, byrow=TRUE), widths=rep(c(1, lcm(w)), lw))
} else {
if (lw > 1)
lay <- layout(cbind(1:lw))
}
if (lw > 1) on.exit(par(opar))
## tidal constituents (in cpd):
## http://www.soest.hawaii.edu/oceanography/dluther/HOME/Tables/Kaw.htm
num.NA <- sum(is.na(x@data$elevation))
par(mgp=mgp)
##par(mar=c(mgp[1],mgp[1]+2.5,mgp[2]+0.5,mgp[2]+1))
par(mar=mar)
##> MSL <- mean(x@data$elevation, na.rm=TRUE)
##> if ("xlim" %in% names(dots)) {
##> xtmp <- subset(x@data$elevation, dots$xlim[1] <= x@data$time & x@data$time <= dots$xlim[2])
##> tmp <- max(abs(range(xtmp-MSL, na.rm=TRUE)))
##> } else {
##> tmp <- max(abs(range(x@data$elevation-MSL, na.rm=TRUE)))
##> }
##> ylim <- c(-tmp, tmp)
##> oceDebug(debug, "ylim=", ylim, "\n")
n <- length(x@data$elevation) # do not trust value in metadata
oceDebug(debug, "which:", which, "\n")
which2 <- oce.pmatch(which, list(all=1, month=2, spectrum=3, cumulativespectrum=4))
oceDebug(debug, "which2:", which2, "\n")
for (w in seq_along(which2)) {
oceDebug(debug, "plotting for code which2[", w, "] = ", which2[w], "\n", sep="")
if (which2[w] == 1) {
plot(x@data$time, x@data$elevation,
xlab="",
ylab=resizableLabel("elevation"),
type='l', xaxs="i",
lwd=0.5, axes=FALSE, ...)
tics <- oce.axis.POSIXct(1, x@data$time, drawTimeRange=drawTimeRange, cex.axis=1, debug=debug-1)
box()
titlePlot(x)
yax <- axis(2)
abline(h=yax, col="darkgray", lty="dotted")
abline(v=tics, col="darkgray", lty="dotted")
##> abline(h=0, col="darkgreen")
##> mtext(side=4, text=sprintf("%.2f m", MSL), col="darkgreen", cex=2/3)
} else if (which2[w] == 2) {
## sample month
from <- trunc(x@data$time[1], "day")
to <- from + 28 * 86400 # 28 days
look <- from <= x@data$time & x@data$time <= to
xx <- x
for (i in seq_along(x@data)) {
xx@data[[i]] <- x@data[[i]][look]
}
if (any(is.finite(xx@data$elevation))) {
atWeek <- seq(from=from, to=to, by="week")
atDay <- seq(from=from, to=to, by="day")
tmp <- max(abs(range(xx@data$elevation, na.rm=TRUE)))
plot(xx@data$time, xx@data$elevation,
xlab="",
ylab=resizableLabel("elevation"),
type='l', xaxs="i",
axes=FALSE)
oce.axis.POSIXct(1, xx@data$time, drawTimeRange=drawTimeRange, cex.axis=1, debug=debug-1)
yax <- axis(2)
abline(h=yax, col="lightgray", lty="dotted")
box()
abline(v=atWeek, col="darkgray", lty="dotted")
abline(v=atDay, col="lightgray", lty="dotted")
##> abline(h=0, col="darkgreen")
##> mtext(side=4, text=sprintf("%.2f m", MSL), col="darkgreen", cex=2/3)
} else {
plot(0:1, 0:1, type="n", xlab="", ylab="", axes=FALSE)
box()
text(0.5, 0.5, "Cannot show first month, since all data are NA then")
}
} else if (which2[w] == 3) { # "spectrum"
if (num.NA == 0) {
Elevation <- ts(x@data$elevation, start=1, deltat=x@metadata$deltat)
##s <- spectrum(Elevation-mean(Elevation),spans=c(5, 3),plot=FALSE,log="y",demean=TRUE,detrend=TRUE)
s <- spectrum(Elevation-mean(Elevation), plot=FALSE, log="y", demean=TRUE, detrend=TRUE)
par(mar=c(mgp[1]+1.25, mgp[1]+1.5, mgp[2]+0.25, mgp[2]+3/4))
xlim <- c(0, 0.1) # FIXME: should be able to set this
ylim <- range(subset(s$spec, xlim[1] <= s$freq & s$freq <= xlim[2]))
plot(s$freq, s$spec, xlim=xlim, ylim=ylim,
xlab=resizableLabel("frequency cph"),
ylab=resizableLabel("spectral density m2/cph"),
#[m^2/cph]",
type='l', log="y")
grid()
drawConstituents()
} else {
plot(0:1, 0:1, type="n", xlab="", ylab="", axes=FALSE)
box()
text(0.5, 0.5, "Some elevations are NA, so cannot calculate the spectrum")
}
} else if (which2[w] == 4) { # "cumulativespectrum"
if (num.NA == 0) {
n <- length(x@data$elevation)
Elevation <- ts(x@data$elevation, start=1, deltat=x@metadata$deltat)
s <- spectrum(Elevation-mean(Elevation), plot=FALSE, log="y", demean=TRUE, detrend=TRUE)
nCumSpec <- length(s$spec)
cumSpec <- sqrt(cumsum(s$spec) / nCumSpec)
##e <- x@data$elevation - mean(x@data$elevation)
par(mar=c(mgp[1]+1.25, mgp[1]+2.5, mgp[2]+0.25, mgp[2]+0.25))
plot(s$freq, cumSpec,
xlab=resizableLabel("frequency cph"),
ylab=expression(paste(integral(Gamma, 0, f), " df [m]")),
type='l', xlim=c(0, 0.1))
grid()
drawConstituents()
} else {
warning("cannot draw sealevel spectum, because the series contains missing values")
}
} else {
stop("unrecognized value of which: ", which[w])
}
if (marginsAsImage) {
## blank plot, to get axis length same as for images
omar <- par("mar")
par(mar=c(mar[1], 1/4, mgp[2]+1/2, mgp[2]+1))
plot(1:2, 1:2, type='n', axes=FALSE, xlab="", ylab="")
par(mar=omar)
}
}
oceDebug(debug, "} # plot.sealevel()\n", unindent=1)
invisible()
})
#' Read a Sealevel File
#'
#' Read a data file holding sea level data. BUG: the time vector assumes GMT,
#' regardless of the GMT.offset value.
#'
#' This function starts by scanning the first line of the file, from which it
#' determines whether the file is in one of two known formats: type 1, the
#' format used at the Hawaii archive centre, and type 2, the
#' comma-separated-value format used by the Marine Environmental Data Service.
#' The file type is inferred by examination of its first line. If that contains
#' the string `Station_Name` the file is of type 2. If
#' the file is in neither of these formats, the user might wish to scan it
#' directly, and then to use [as.sealevel()] to create a
#' `sealevel` object.
#' The Hawaii archive site at
#' `http://ilikai.soest.hawaii.edu/uhslc/datai.html` at one time provided a graphical
#' interface for downloading sealevel data in Type 1, with format that was once
#' described at `http://ilikai.soest.hawaii.edu/rqds/hourly.fmt` (although that link
#' was observed to no longer work, on December 4, 2016).
#' Examination of data retrieved from what seems to be a replacement Hawaii server
#' (https://uhslc.soest.hawaii.edu/data/?rq) in September 2019 indicated that the
#' format had been changed to what is called Type 3 by `read.sealevel`.
#' Web searches did not uncover documentation on this format, so the
#' decoding scheme was developed solely through examination of
#' data files, which means that it might be not be correct.
#' The MEDS repository (\url{http://www.isdm-gdsi.gc.ca/isdm-gdsi/index-eng.html})
#' provides Type 2 data.
#'
#' @param file a connection or a character string giving the name of the file
#' to load. See Details for the types of files that are recognized.
#'
#' @param tz time zone. The default value, `oceTz`, is set to `UTC`
#' at setup. (If a time zone is present in the file header, this will
#' supercede the value given here.)
#'
#' @param processingLog if provided, the action item to be stored in the log.
#' (Typically only provided for internal calls; the default that it provides is
#' better for normal calls by a user.)
#' @template debugTemplate
#'
#' @return A [sealevel-class] object.
#'
#' @author Dan Kelley
#'
#' @family things related to sealevel data
read.sealevel <- function(file, tz=getOption("oceTz"), processingLog, debug=getOption("oceDebug"))
{
oceDebug(debug, "read.sealevel(file=\"", file, "\", ...) {\n", sep="", unindent=1)
if (!missing(file) && is.character(file) && 0 == file.info(file)$size)
stop("empty file")
filename <- "?"
if (is.character(file)) {
filename <- fullFilename(file)
file <- file(file, "r")
on.exit(close(file))
}
if (!inherits(file, "connection"))
stop("argument `file' must be a character string or connection")
if (!isOpen(file)) {
filename <- "(connection)"
open(file, "r")
on.exit(close(file))
}
fileOrig <- file
firstLine <- readLines(file, n=1, encoding="UTF-8")
header <- firstLine
oceDebug(debug, "header (first line in file): '", header, "'\n", sep="")
pushBack(firstLine, file)
stationNumber <- NA
stationVersion <- NA
stationName <- NULL
region <- NULL
year <- NA
latitude <- NA
longitude <- NA
GMTOffset <- NA
decimationMethod <- NA
referenceOffset <- NA
referenceCode <- NA
res <- new('sealevel')
if (substr(firstLine, 1, 12) == "Station_Name") {
oceDebug(debug, "File is of format 1 (e.g. as in MEDS archives)\n")
## Station_Name,HALIFAX
## Station_Number,490
## Latitude_Decimal_Degrees,44.666667
## Longitude_Decimal_Degrees,63.583333
## Datum,CD
## Time_Zone,AST
## SLEV=Observed Water Level
## Obs_date,SLEV
## 01/01/2001 12:00 AM,1.82,
headerLength <- 8
header <- readLines(file, n = headerLength)
if (debug > 0) {
print(header)
}
stationName <- strsplit(header[1], ",")[[1]][2]
stationNumber <- as.numeric(strsplit(header[2], ",")[[1]][2])
latitude <- as.numeric(strsplit(header[3], ",")[[1]][2])
longitude <- as.numeric(strsplit(header[4], ",")[[1]][2])
tz <- strsplit(header[6], ",")[[1]][2] # needed for get GMT offset
GMTOffset <- GMTOffsetFromTz(tz)
oceDebug(debug, "about to read data\n")
x <- read.csv(file, header=FALSE, stringsAsFactors=FALSE)#, skip=headerLength)
oceDebug(debug, "... finished reading data\n")
if (length(grep("[0-9]{4}/", x$V1[1])) > 0) {
oceDebug(debug, "Date format is year/month/day hour:min with hour in range 1:24\n")
time <- strptime(as.character(x$V1), "%Y/%m/%d %H:%M", "UTC") + 3600 * GMTOffset
} else {
oceDebug(debug, "Date format is day/month/year hour:min AMPM with hour in range 1:12 and AMPM indicating whether day or night\n")
time <- strptime(as.character(x$V1), "%d/%m/%Y %I:%M %p", "UTC") + 3600 * GMTOffset
}
elevation <- as.numeric(x$V2)
oceDebug(debug, "tz=", tz, "so GMTOffset=", GMTOffset, "\n",
"first pass has time string:", as.character(x$V1)[1], "\n",
"first pass has time start:", format(time[1]), " ", attr(time[1], "tzone"), "\n")
year <- as.POSIXlt(time[1])$year + 1900
} else {
oceDebug(debug, "File is of type 2 or 3\n")
d <- readLines(file)
n <- length(d)
header <- d[1]
if (grepl("LAT=", header) && grepl("LONG=", header) && grepl("TIMEZONE",header)) {
## URL
## http://uhslc.soest.hawaii.edu/woce/h275.dat
## is a sample file, which starts as below (with quote marks added):
## '275HALIFAX 1895 LAT=44 40.0N LONG=063 35.0W TIMEZONE=GMT '
## '275HALIFAX 189501011 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
## '275HALIFAX 189501012 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
oceDebug(debug, "type 3 (format inferred/guessed from e.g. http://uhslc.soest.hawaii.edu/woce/h275.dat)\n")
stationNumber <- strtrim(header, 3)
oceDebug(debug, " stationNumber='", stationNumber, "'\n", sep="")
longitudeString <- gsub("^.*LONG=([ 0-9.]*[EWew]).*$", "\\1", header)
latitudeString <- gsub("^.*LAT=([ 0-9.]*[NSns]).*$", "\\1", header)
oceDebug(debug, " longitudeString='", longitudeString, "'\n", sep="")
oceDebug(debug, " latitudeString='", latitudeString, "'\n", sep="")
longitudeSplit <- strsplit(longitudeString, split="[ \t]+")[[1]]
longitudeDegree <- longitudeSplit[1]
longitudeMinute <- longitudeSplit[2]
oceDebug(debug, " longitudeDegree='", longitudeDegree, "'\n", sep="")
oceDebug(debug, " longitudeMinute='", longitudeMinute, "'\n", sep="")
longitudeSign <- if (grepl("[wW]", longitudeMinute)) -1 else 1
oceDebug(debug, " longitudeSign=", longitudeSign, "\n")
longitudeMinute <- gsub("[EWew]", "", longitudeMinute)
oceDebug(debug, " longitudeMinute='", longitudeMinute, "' after removing EW suffix\n", sep="")
longitude <- longitudeSign * (as.numeric(longitudeDegree) + as.numeric(longitudeMinute)/60)
oceDebug(debug, " longitude=", longitude, "\n")
latitudeSplit <- strsplit(latitudeString, split="[ \t]+")[[1]]
latitudeDegree <- latitudeSplit[1]
latitudeMinute <- latitudeSplit[2]
latitudeSign <- if (grepl("[sS]", latitudeMinute)) -1 else 1
oceDebug(debug, " latitudeSign=", latitudeSign, "\n")
latitudeMinute <- gsub("[SNsn]", "", latitudeMinute)
oceDebug(debug, " latitudeMinute='", latitudeMinute, "' after removing NS suffix\n", sep="")
latitude <- latitudeSign * (as.numeric(latitudeDegree) + as.numeric(latitudeMinute)/60)
oceDebug(debug, " latitude=", latitude, "\n")
## Remove interspersed year boundaries (which look like the first line).
d2 <- d[!grepl("LAT=.*LONG=", d)]
start <- 1 + which(strsplit(header,"")[[1]]==" ")[1]
d3 <- substr(d2, start, 1000L)
## Fix problem where the month is sometimes e.g. ' 1' instead of '01'
d4 <- gsub("^([1-9][0-9]{3}) ", "\\10", d3)
## Fix problem where the day is sometimes e.g. ' 1' instead of '01'
d5 <- gsub("^([1-9][0-9]{3}[0-9]{2}) ", "\\10", d4)
n <- length(d5)
## Now we have as below. But the second block sometimes has ' ' for '0', so we
## need to fix that.
## '275HALIFAX 189501011 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
twelve <- seq(1, 12, 1)
elevation <- rep(NA, 12 * n)
time <- rep(NA, 12 * n)
lastDayPortion <- NULL # value defined at i=1, checked at i>2, so the initial value is immaterial
for (i in 1:n) {
sp <- strsplit(d5[i], "[ ]+")[[1]]
target.index <- 12 * (i-1) + twelve
## oceDebug(debug, " i=", i, ", length(sp)=", length(sp), "\n")
if (length(sp) != 13) {
stop("cannot parse tokens on line '", d2[i], "'\n", sep="")
}
elevation[target.index] <- as.numeric(sp[2:13])
dayPortion <- as.numeric(substr(sp[1], 9, 9))
if (i == 1) {
startDay <- as.POSIXct(strptime(paste(substr(sp[1], 1, 8), "00:00:00"), "%Y%m%d"), tz=tz)
oceDebug(debug, " startDay=", startDay, "\n")
} else {
if (dayPortion == 1) {
if (i > 2 && lastDayPortion != 2)
stop("non-alternating day portions on data line ", i)
} else if (dayPortion == 2) {
if (i > 2 && lastDayPortion != 1)
stop("non-alternating day portions on data line ", i)
} else {
stop("day portion is ", dayPortion, " but must be 1 or 2, on data line", i)
}
}
lastDayPortion <- dayPortion
time[target.index] <- as.POSIXct(sp[1], format="%Y%m%d",tz="UTC")+3600*(seq(0,11) + 12 * (dayPortion-1))
}
elevation[elevation==9999] <- NA
elevation <- elevation / 1000 # convert mm to m
time <- numberAsPOSIXct(time, tz="UTC") # guess on timezone
} else {
oceDebug(debug, "type 2 (an old Hawaii format, inferred from documentation)\n")
stationNumber <- substr(header, 1, 3)
stationVersion <- substr(header, 4, 4)
stationName <- substr(header, 6, 23)
stationName <- sub("[ ]*$", "", stationName)
region <- substr(header, 25, 43)
region <- sub("[ ]*$", "", region)
year <- substr(header, 45, 48)
latitudeStr <- substr(header, 50, 55) #degrees,minutes,tenths,hemisphere
latitude <- as.numeric(substr(latitudeStr, 1, 2)) + (as.numeric(substr(latitudeStr, 3, 5)))/600
if (tolower(substr(latitudeStr, 6, 6)) == "s") latitude <- -latitude
longitudeStr <- substr(header, 57, 63) #degrees,minutes,tenths,hemisphere
longitude <- as.numeric(substr(longitudeStr, 1, 3)) + (as.numeric(substr(longitudeStr, 4, 6)))/600
if (tolower(substr(longitudeStr, 7, 7)) == "w") longitude <- -longitude
GMTOffset <- substr(header, 65, 68) #hours,tenths (East is +ve)
oceDebug(debug, "GMTOffset=", GMTOffset, "\n")
decimationMethod <- substr(header, 70, 70) #1=filtered 2=average 3=spot readings 4=other
referenceOffset <- substr(header, 72, 76) # add to values
referenceCode <- substr(header, 77, 77) # add to values
units <- substr(header, 79, 80)
oceDebug(debug, "units=", units, "\n")
if (nchar(units) == 0) {
warning("no units can be inferred from the file, so assuming 'mm'")
} else {
if (units != "mm" && units != "MM")
stop("require units to be 'mm' or 'MM', not '", units, "'")
}
elevation <- array(NA_real_, 12 * (n-1))
## first.twelve.hours <- 3600 * (0:11)
## second.twelve.hours <- 3600 * (12:23)
twelve <- seq(1, 12, 1)
lastDayPortion <- -1 # ignored; prevents undefined warning in code analysis
for (i in 2:n) {
sp <- strsplit(d[i], "[ ]+")[[1]]
target.index <- 12 * (i-2) + twelve
elevation[target.index] <- as.numeric(sp[4:15])
dayPortion <- as.numeric(substr(sp[3], 9, 9))
if (i == 2) {
startDay <- as.POSIXct(strptime(paste(substr(sp[3], 1, 8), "00:00:00"), "%Y%m%d"), tz=tz)
} else {
if (dayPortion == 1) {
if (i > 2 && lastDayPortion != 2)
stop("non-alternating day portions on data line ", i)
} else if (dayPortion == 2) {
if (i > 2 && lastDayPortion != 1)
stop("non-alternating day portions on data line ", i)
} else {
stop("day portion is ", dayPortion, " but must be 1 or 2, on data line", i)
}
}
lastDayPortion <- dayPortion
}
time <- as.POSIXct(startDay + 3600 * (seq(0, 12 * (n-1)-1)), tz=tz)
elevation[elevation==9999] <- NA
if (tolower(units) == "mm") {
elevation <- elevation / 1000
} else {
stop("require units to be MM")
}
}
}
##num.missing <- sum(is.na(elevation))
##if (num.missing > 0) warning("there are ", num.missing, " missing points in this timeseries, at indices ", paste(which(is.na(elevation)), ""))
res@metadata$filename <- filename
res@metadata$header <- header
res@metadata$year <- year
res@metadata$stationNumber <- stationNumber
res@metadata$stationVersion <- stationVersion
res@metadata$stationName <- stationName
res@metadata$region <- region
res@metadata$latitude <- latitude
res@metadata$longitude <- longitude
res@metadata$GMTOffset <- GMTOffset
res@metadata$decimationMethod <- decimationMethod
res@metadata$referenceOffset <- referenceOffset
res@metadata$referenceCode <- referenceCode
res@metadata$units <- list(elevation=list(unit=expression(m), scale=""))
res@metadata$n <- length(time)
## deltat is in hours
res@metadata$deltat <- if (res@metadata$n > 1) (as.numeric(time[2]) - as.numeric(time[1])) / 3600 else 0
if (missing(processingLog))
processingLog <- paste('read.sealevel(file="', file, '", tz="', tz, sep="", collapse="")
res@data$elevation <- elevation
res@data$time <- time
res@processingLog <- processingLogAppend(res@processingLog,
paste('read.sealevel(file="', fileOrig, '", tz="', tz, '")', sep="", collapse=""))
res
}
| /issuestests/oce/R/sealevel.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 42,135 | r | #' Class to Store Sealevel Data
#'
#' This class stores sealevel data, e.g. from a tide gauge.
#'
#' @templateVar class sealevel
#'
#' @templateVar dataExample The key items stored in this slot are `time` and `elevation`.
#'
#' @templateVar metadataExample An example of the former might be the location at which a `sealevel` measurement was made, stored in `longitude` and `latitude`, and of the latter might be `filename`, the name of the data source.
#'
#' @template slot_summary
#'
#' @template slot_put
#'
#' @template slot_get
#'
#' @author Dan Kelley
#'
#' @family classes provided by oce
#' @family things related to sealevel data
setClass("sealevel", contains="oce")
#' @title Sealevel data for Halifax Harbour
#'
#' @description
#' This sample sea-level dataset is the 2003 record from Halifax Harbour in
#' Nova Scotia, Canada. For reasons that are not mentioned on the data archive
#' website, the record ends on the 8th of October.
#'
#' @name sealevel
#'
#' @docType data
#'
#' @author Dan Kelley
#'
#' @source The data were created as \preformatted{ sealevel <-
#' read.oce("490-01-JAN-2003_slev.csv") sealevel <- oce.edit(sealevel,
#' "longitude", -sealevel[["longitude"]], reason="Fix longitude hemisphere") }
#' where the csv file was downloaded from reference 1. Note the correction of longitude
#' sign, which is required because the data file has no indication that this is
#' the western hemisphere.
#'
#' @references
#' 1. Fisheries and Oceans Canada \url{http://www.meds-sdmm.dfo-mpo.gc.ca/isdm-gdsi/index-eng.html}
#'
#' @family datasets provided with oce
#' @family things related to sealevel data
NULL
#' Sea-level data set acquired in 1975 at Tuktoyaktuk
#'
#' This sea-level dataset is provided with in Appendix 7.2 of Foreman (1977)
#' and also with the `T_TIDE` package (Pawlowicz et al., 2002). It results
#' from measurements made in 1975 at Tuktoyaktuk, Northwest Territories,
#' Canada.
#'
#' The data set contains 1584 points, some of which have NA for sea-level
#' height.
#'
#' Although Foreman's Appendix 7.2 states that times are in Mountain standard
#' time, the timezone is set to `UTC` in the present case, so that the
#' results will be similar to those he provides in his Appendix 7.3.
#'
#' @name sealevelTuktoyaktuk
#'
#' @docType data
#'
#' @references Foreman, M. G. G., 1977. Manual for tidal heights analysis and
#' prediction. Pacific Marine Science Report 77-10, Institute of Ocean
#' Sciences, Patricia Bay, Sidney, BC, 58pp.
#'
#' Pawlowicz, Rich, Bob Beardsley, and Steve Lentz, 2002. Classical tidal
#' harmonic analysis including error estimates in MATLAB using `T_TIDE`.
#' Computers and Geosciences, 28, 929-937.
#'
#' @source The data were based on the `T_TIDE` dataset, which in turn
#' seems to be based on Appendix 7.2 of Foreman (1977). Minor editing was on
#' file format, and then the `sealevelTuktoyaktuk` object was created
#' using [as.sealevel()].
#'
#' @examples
#'\donttest{
#' library(oce)
#' data(sealevelTuktoyaktuk)
#' time <- sealevelTuktoyaktuk[["time"]]
#' elevation <- sealevelTuktoyaktuk[["elevation"]]
#' oce.plot.ts(time, elevation, type='l', ylab="Height [m]", ylim=c(-2, 6))
#' legend("topleft", legend=c("Tuktoyaktuk (1975)","Detided"),
#' col=c("black","red"),lwd=1)
#' tide <- tidem(sealevelTuktoyaktuk)
#' detided <- elevation - predict(tide)
#' lines(time, detided, col="red")
#'}
#'
#' @section Historical note:
#' Until Jan 6, 2018, the time in this dataset had been increased
#' by 7 hours. However, this alteration was removed on this date,
#' to make for simpler comparison of amplitude and phase output with
#' the results obtained by Foreman (1977) and Pawlowicz et al. (2002).
#'
#' @family datasets provided with oce
#' @family things related to sealevel data
NULL
setMethod(f="initialize",
signature="sealevel",
definition=function(.Object, elevation, time, ...) {
.Object <- callNextMethod(.Object, ...)
if (!missing(elevation))
.Object@data$elevation <- elevation
if (!missing(time))
.Object@data$time <- time
.Object@processingLog$time <- presentTime()
.Object@processingLog$value <- "create 'sealevel' object"
return(.Object)
})
#' @title Summarize a Sealevel Object
#'
#' @description
#' Summarizes some of the data in a sealevel object.
#'
#' @param object A [sealevel-class] object.
#'
#' @param \dots further arguments passed to or from other methods.
#'
#' @return A matrix containing statistics of the elements of the `data`
#' slot.
#'
#' @author Dan Kelley
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' summary(sealevel)
#'
#' @family things related to sealevel data
setMethod(f="summary",
signature="sealevel",
definition=function(object, ...) {
cat("Sealevel Summary\n----------------\n\n")
showMetadataItem(object, "stationNumber", "number: ")
showMetadataItem(object, "version", "version: ")
showMetadataItem(object, "stationName", "name: ")
showMetadataItem(object, "region", "region: ")
showMetadataItem(object, "deltat", "sampling delta-t: ")
cat("* Location: ", latlonFormat(object@metadata$latitude,
object@metadata$longitude,
digits=5), "\n")
showMetadataItem(object, "year", "year: ")
ndata <- length(object@data$elevation)
cat("* number of observations: ", ndata, "\n")
cat("* \" non-missing: ", sum(!is.na(object@data$elevation)), "\n")
invisible(callNextMethod()) # summary
})
#' @title Subset a Sealevel Object
#'
#' @description
#' This function is somewhat analogous to [subset.data.frame()], but
#' subsetting is only permitted by time.
#'
#' @param x a [sealevel-class] object.
#'
#' @param subset a condition to be applied to the `data` portion of
#' `x`.
#'
#' @param \dots ignored.
#'
#' @return A new `sealevel` object.
#'
#' @author Dan Kelley
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' plot(sealevel)
#' plot(subset(sealevel, time < mean(range(sealevel[['time']]))))
#'
#' @family things related to sealevel data
#' @family functions that subset oce objects
setMethod(f="subset",
signature="sealevel",
definition=function(x, subset, ...) {
res <- new("sealevel")
res@metadata <- x@metadata
res@processingLog <- x@processingLog
for (i in seq_along(x@data)) {
r <- eval(substitute(subset), x@data, parent.frame(2))
r <- r & !is.na(r)
res@data[[i]] <- x@data[[i]][r]
}
names(res@data) <- names(x@data)
subsetString <- paste(deparse(substitute(subset)), collapse=" ")
res@processingLog <- processingLogAppend(res@processingLog, paste("subset.sealevel(x, subset=", subsetString, ")", sep=""))
res
})
#' @title Extract Something From a Sealevel Object
#'
#' @param x a [sealevel-class] object.
#'
#' @template sub_subTemplate
#'
#' @family things related to sealevel data
setMethod(f="[[",
signature(x="sealevel", i="ANY", j="ANY"),
definition=function(x, i, j, ...) {
callNextMethod() # [[
})
#' @title Replace Parts of a Sealevel Object
#'
#' @param x a [sealevel-class] object.
#'
#' @template sub_subsetTemplate
#'
#' @family things related to sealevel data
setMethod(f="[[<-",
signature(x="sealevel", i="ANY", j="ANY"),
definition=function(x, i, j, ..., value) {
callNextMethod(x=x, i=i, j=j, ...=..., value=value) # [[<-
})
setValidity("sealevel",
function(object) {
ndata <- length(object@data)
lengths <- vector("numeric", ndata)
for (i in 1:ndata)
lengths[i] <- length(object@data[[i]])
if (var(lengths) != 0) {
cat("lengths of data elements are unequal\n")
return(FALSE)
} else
return(TRUE)
})
#' Coerce Data Into a Sealevel Object
#'
#' Coerces a dataset (minimally, a sequence of times and heights) into a
#' sealevel dataset.
#' The arguments are based on the standard data format, as were described in a
#' file formerly available at reference 1.
#'
#' @param elevation a list of sea-level heights in metres, in an hourly
#' sequence.
#'
#' @param time optional list of times, in POSIXct format. If missing, the list
#' will be constructed assuming hourly samples, starting at 0000-01-01
#' 00:00:00.
#'
#' @param header a character string as read from first line of a standard data
#' file.
#'
#' @param stationNumber three-character string giving station number.
#'
#' @param stationVersion single character for version of station.
#'
#' @param stationName the name of station (at most 18 characters).
#'
#' @param region the name of the region or country of station (at most 19
#' characters).
#'
#' @param year the year of observation.
#'
#' @param longitude the longitude in decimal degrees, positive east of
#' Greenwich.
#'
#' @param latitude the latitude in decimal degrees, positive north of the
#' equator.
#'
#' @param GMTOffset offset from GMT, in hours.
#'
#' @param decimationMethod a coded value, with 1 meaning filtered, 2 meaning a
#' simple average of all samples, 3 meaning spot readings, and 4 meaning some
#' other method.
#'
#' @param referenceOffset ?
#'
#' @param referenceCode ?
#'
#' @param deltat optional interval between samples, in hours (as for the
#' [ts()] timeseries function). If this is not provided, and `t`
#' can be understood as a time, then the difference between the first two times
#' is used. If this is not provided, and `t` cannot be understood as a
#' time, then 1 hour is assumed.
#'
#' @return A [sealevel-class] object (for details, see [read.sealevel()]).
#'
#' @author Dan Kelley
#'
#' @seealso The documentation for the [sealevel-class] class explains the
#' structure of sealevel objects, and also outlines the other functions dealing
#' with them.
#'
#' @references `http://ilikai.soest.hawaii.edu/rqds/hourly.fmt` (this link
#' worked for years but failed at least temporarily on December 4, 2016).
#'
#' @examples
#' library(oce)
#'
#' # Construct a year of M2 tide, starting at the default time
#' # 0000-01-01T00:00:00.
#' h <- seq(0, 24*365)
#' elevation <- 2.0 * sin(2*pi*h/12.4172)
#' sl <- as.sealevel(elevation)
#' summary(sl)
#'
#' # As above, but start at the Y2K time.
#' time <- as.POSIXct("2000-01-01") + h * 3600
#' sl <- as.sealevel(elevation, time)
#' summary(sl)
#' @family things related to sealevel data
as.sealevel <- function(elevation,
time,
header=NULL,
stationNumber=NA,
stationVersion=NA,
stationName=NULL,
region=NULL,
year=NA,
longitude=NA, latitude=NA,
GMTOffset=NA,
decimationMethod=NA,
referenceOffset=NA,
referenceCode=NA,
deltat)
{
if (missing(elevation))
stop("must supply sealevel height, elevation, in metres")
if (inherits(elevation, "POSIXt"))
stop("elevation must be a numeric vector, not a time vector")
res <- new('sealevel')
n <- length(elevation)
if (missing(time)) {
## construct hourly from time "zero"
start <- as.POSIXct("0000-01-01 00:00:00", tz="UTC")
time <- as.POSIXct(start + seq(0, n - 1, 1) * 3600, tz="UTC")
if (is.na(GMTOffset))
GMTOffset <- 0 # FIXME: do I want to do this?
} else {
time <- as.POSIXct(time, tz="UTC")
}
if (missing(deltat))
deltat <- as.numeric(difftime(time[2], time[1], units="hours"))
if (is.na(deltat) | deltat <= 0)
deltat <- 1
res@metadata$filename <- ""
res@metadata$header <- header
res@metadata$year <- year
res@metadata$stationNumber <- stationNumber
res@metadata$stationVersion <- stationVersion
res@metadata$stationName <- stationName
res@metadata$region <- region
res@metadata$latitude <- latitude
res@metadata$longitude <- longitude
res@metadata$GMTOffset <- GMTOffset
res@metadata$decimationMethod <- decimationMethod
res@metadata$referenceOffset <- referenceOffset
res@metadata$referenceCode <- referenceCode
res@metadata$units <- list(elevation=list(unit=expression(m), scale=""))
res@metadata$n <- length(t)
res@metadata$deltat <- deltat
res@data$elevation <- elevation
res@data$time <- time
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
#' @title Plot Sealevel Data
#'
#' @description
#' Creates a plot for a sea-level dataset, in one of two varieties. Depending
#' on the length of `which`, either a single-panel or multi-panel plot is
#' drawn. If there is just one panel, then the value of `par` used in
#' `plot,sealevel-method` is retained upon exit, making it convenient to add to
#' the plot. For multi-panel plots, `par` is returned to the value it had
#' before the call.
#'
#' @param x a [sealevel-class] object.
#'
#' @param which a numerical or string vector indicating desired plot types,
#' with possibilities 1 or `"all"` for a time-series of all the elevations, 2 or
#' `"month"` for a time-series of just the first month, 3 or
#' `"spectrum"` for a power spectrum (truncated to frequencies below 0.1
#' cycles per hour, or 4 or `"cumulativespectrum"` for a cumulative
#' integral of the power spectrum.
#'
#' @param drawTimeRange boolean that applies to panels with time as the
#' horizontal axis, indicating whether to draw the time range in the top-left
#' margin of the plot.
#'
#' @param mgp 3-element numerical vector to use for [`par`]`("mgp")`, and also
#' for [`par`]`("mar")`, computed from this. The default is tighter than the R
#' default, in order to use more space for the data and less for the axes.
#'
#' @param mar value to be used with [`par`]`("mar")`.
#'
#' @param marginsAsImage boolean, `TRUE` to put a wide margin to the right
#' of time-series plots, matching the space used up by a palette in an
#' [imagep()] plot.
#'
#' @param debug a flag that turns on debugging, if it exceeds 0.
#'
#' @param \dots optional arguments passed to plotting functions.
#'
#' @return None.
#'
#' @author Dan Kelley
#'
#' @seealso The documentation for the [sealevel-class] class explains the
#' structure of sealevel objects, and also outlines the other functions dealing
#' with them.
#'
#' @section Historical Note:
#' Until 2020-02-06, sea-level plots had the mean value removed, and indicated
#' with a tick mark and margin note on the right-hand side of the plot.
#' This behaviour was confusing. The change did not go through the usual
#' deprecation process, because the margin-note behaviour had not
#' been documented.
#'
#' @references The example refers to Hurricane Juan, which caused a great deal
#' of damage to Halifax in 2003. Since this was in the era of the digital
#' photo, a casual web search will uncover some spectacular images of damage,
#' from both wind and storm surge. A map of the path of Hurricane Juan across
#' Nova Scotia is at
#' \url{http://ec.gc.ca/ouragans-hurricanes/default.asp?lang=En&n=222F51F7-1}.
#' Landfall, very near the site of this sealevel
#' gauge, was between 00:10 and 00:20 Halifax local time on Monday, Sept 29,
#' 2003.
#'
#' @examples
#' library(oce)
#' data(sealevel)
#' ## local Halifax time is UTC + 4h
#' juan <- as.POSIXct("2003-09-29 00:15:00", tz="UTC")+4*3600
#' plot(sealevel, which=1, xlim=juan+86400*c(-7, 7))
#' abline(v=juan, col='red')
#'
#' @family functions that plot oce data
#' @family things related to sealevel data
#'
#' @aliases plot.sealevel
setMethod(f="plot",
signature=signature("sealevel"),
definition=function(x, which=1:3,
drawTimeRange=getOption("oceDrawTimeRange"),
mgp=getOption("oceMgp"),
mar=c(mgp[1]+0.5, mgp[1]+1.5, mgp[2]+1, mgp[2]+3/4),
marginsAsImage=FALSE,
debug=getOption("oceDebug"),
...)
{
oceDebug(debug, "plot.sealevel(..., mar=c(", paste(mar, collapse=", "), "), ...) {\n", sep="", unindent=1)
if ("adorn" %in% names(list(...)))
warning("In plot,adv-method() : the 'adorn' argument was removed in November 2017", call.=FALSE)
dots <- list(...)
titlePlot<-function(x)
{
title <- ""
if (!is.null(x@metadata$stationNumber) || !is.null(x@metadata$stationName) || !is.null(x@metadata$region))
title <- paste(title, gettext("Station ", domain="R-oce"),
if (!is.na(x@metadata$stationNumber)) x@metadata$stationNumber else "",
" ",
if (!is.null(x@metadata$stationName)) x@metadata$stationName else "",
" ",
if (!is.null(x@metadata$region)) x@metadata$region else "",
sep="")
if (!is.na(x@metadata$latitude) && !is.na(x@metadata$longitude))
title <- paste(title, latlonFormat(x@metadata$latitude, x@metadata$longitude), sep="")
if (nchar(title) > 0)
mtext(side=3, title, adj=1, cex=2/3)
}
drawConstituent<-function(frequency=0.0805114007, label="M2", col="darkred", side=1)
{
abline(v=frequency, col=col)
mtext(label, side=side, at=frequency, col=col, cex=3/4*par("cex"))
}
drawConstituents<-function()
{
drawConstituent(0.0387306544, "O1", side=1)
##draw.constituent(0.0416666721, "S1", side=3)
drawConstituent(0.0417807462, "K1", side=3)
drawConstituent(0.0789992488, "N2", side=1)
drawConstituent(0.0805114007, "M2", side=3)
drawConstituent(0.0833333333, "S2", side=1)
}
if (!inherits(x, "sealevel"))
stop("method is only for objects of class '", "sealevel", "'")
opar <- par(no.readonly = TRUE)
par(mgp=mgp, mar=mar)
lw <- length(which)
if (marginsAsImage) {
scale <- 0.7
w <- (1.5 + par("mgp")[2]) * par("csi") * scale * 2.54 + 0.5
if (lw > 1)
lay <- layout(matrix(1:(2*lw), nrow=lw, byrow=TRUE), widths=rep(c(1, lcm(w)), lw))
} else {
if (lw > 1)
lay <- layout(cbind(1:lw))
}
if (lw > 1) on.exit(par(opar))
## tidal constituents (in cpd):
## http://www.soest.hawaii.edu/oceanography/dluther/HOME/Tables/Kaw.htm
num.NA <- sum(is.na(x@data$elevation))
par(mgp=mgp)
##par(mar=c(mgp[1],mgp[1]+2.5,mgp[2]+0.5,mgp[2]+1))
par(mar=mar)
##> MSL <- mean(x@data$elevation, na.rm=TRUE)
##> if ("xlim" %in% names(dots)) {
##> xtmp <- subset(x@data$elevation, dots$xlim[1] <= x@data$time & x@data$time <= dots$xlim[2])
##> tmp <- max(abs(range(xtmp-MSL, na.rm=TRUE)))
##> } else {
##> tmp <- max(abs(range(x@data$elevation-MSL, na.rm=TRUE)))
##> }
##> ylim <- c(-tmp, tmp)
##> oceDebug(debug, "ylim=", ylim, "\n")
n <- length(x@data$elevation) # do not trust value in metadata
oceDebug(debug, "which:", which, "\n")
which2 <- oce.pmatch(which, list(all=1, month=2, spectrum=3, cumulativespectrum=4))
oceDebug(debug, "which2:", which2, "\n")
for (w in seq_along(which2)) {
oceDebug(debug, "plotting for code which2[", w, "] = ", which2[w], "\n", sep="")
if (which2[w] == 1) {
plot(x@data$time, x@data$elevation,
xlab="",
ylab=resizableLabel("elevation"),
type='l', xaxs="i",
lwd=0.5, axes=FALSE, ...)
tics <- oce.axis.POSIXct(1, x@data$time, drawTimeRange=drawTimeRange, cex.axis=1, debug=debug-1)
box()
titlePlot(x)
yax <- axis(2)
abline(h=yax, col="darkgray", lty="dotted")
abline(v=tics, col="darkgray", lty="dotted")
##> abline(h=0, col="darkgreen")
##> mtext(side=4, text=sprintf("%.2f m", MSL), col="darkgreen", cex=2/3)
} else if (which2[w] == 2) {
## sample month
from <- trunc(x@data$time[1], "day")
to <- from + 28 * 86400 # 28 days
look <- from <= x@data$time & x@data$time <= to
xx <- x
for (i in seq_along(x@data)) {
xx@data[[i]] <- x@data[[i]][look]
}
if (any(is.finite(xx@data$elevation))) {
atWeek <- seq(from=from, to=to, by="week")
atDay <- seq(from=from, to=to, by="day")
tmp <- max(abs(range(xx@data$elevation, na.rm=TRUE)))
plot(xx@data$time, xx@data$elevation,
xlab="",
ylab=resizableLabel("elevation"),
type='l', xaxs="i",
axes=FALSE)
oce.axis.POSIXct(1, xx@data$time, drawTimeRange=drawTimeRange, cex.axis=1, debug=debug-1)
yax <- axis(2)
abline(h=yax, col="lightgray", lty="dotted")
box()
abline(v=atWeek, col="darkgray", lty="dotted")
abline(v=atDay, col="lightgray", lty="dotted")
##> abline(h=0, col="darkgreen")
##> mtext(side=4, text=sprintf("%.2f m", MSL), col="darkgreen", cex=2/3)
} else {
plot(0:1, 0:1, type="n", xlab="", ylab="", axes=FALSE)
box()
text(0.5, 0.5, "Cannot show first month, since all data are NA then")
}
} else if (which2[w] == 3) { # "spectrum"
if (num.NA == 0) {
Elevation <- ts(x@data$elevation, start=1, deltat=x@metadata$deltat)
##s <- spectrum(Elevation-mean(Elevation),spans=c(5, 3),plot=FALSE,log="y",demean=TRUE,detrend=TRUE)
s <- spectrum(Elevation-mean(Elevation), plot=FALSE, log="y", demean=TRUE, detrend=TRUE)
par(mar=c(mgp[1]+1.25, mgp[1]+1.5, mgp[2]+0.25, mgp[2]+3/4))
xlim <- c(0, 0.1) # FIXME: should be able to set this
ylim <- range(subset(s$spec, xlim[1] <= s$freq & s$freq <= xlim[2]))
plot(s$freq, s$spec, xlim=xlim, ylim=ylim,
xlab=resizableLabel("frequency cph"),
ylab=resizableLabel("spectral density m2/cph"),
#[m^2/cph]",
type='l', log="y")
grid()
drawConstituents()
} else {
plot(0:1, 0:1, type="n", xlab="", ylab="", axes=FALSE)
box()
text(0.5, 0.5, "Some elevations are NA, so cannot calculate the spectrum")
}
} else if (which2[w] == 4) { # "cumulativespectrum"
if (num.NA == 0) {
n <- length(x@data$elevation)
Elevation <- ts(x@data$elevation, start=1, deltat=x@metadata$deltat)
s <- spectrum(Elevation-mean(Elevation), plot=FALSE, log="y", demean=TRUE, detrend=TRUE)
nCumSpec <- length(s$spec)
cumSpec <- sqrt(cumsum(s$spec) / nCumSpec)
##e <- x@data$elevation - mean(x@data$elevation)
par(mar=c(mgp[1]+1.25, mgp[1]+2.5, mgp[2]+0.25, mgp[2]+0.25))
plot(s$freq, cumSpec,
xlab=resizableLabel("frequency cph"),
ylab=expression(paste(integral(Gamma, 0, f), " df [m]")),
type='l', xlim=c(0, 0.1))
grid()
drawConstituents()
} else {
warning("cannot draw sealevel spectum, because the series contains missing values")
}
} else {
stop("unrecognized value of which: ", which[w])
}
if (marginsAsImage) {
## blank plot, to get axis length same as for images
omar <- par("mar")
par(mar=c(mar[1], 1/4, mgp[2]+1/2, mgp[2]+1))
plot(1:2, 1:2, type='n', axes=FALSE, xlab="", ylab="")
par(mar=omar)
}
}
oceDebug(debug, "} # plot.sealevel()\n", unindent=1)
invisible()
})
#' Read a Sealevel File
#'
#' Read a data file holding sea level data. BUG: the time vector assumes GMT,
#' regardless of the GMT.offset value.
#'
#' This function starts by scanning the first line of the file, from which it
#' determines whether the file is in one of two known formats: type 1, the
#' format used at the Hawaii archive centre, and type 2, the
#' comma-separated-value format used by the Marine Environmental Data Service.
#' The file type is inferred by examination of its first line. If that contains
#' the string `Station_Name` the file is of type 2. If
#' the file is in neither of these formats, the user might wish to scan it
#' directly, and then to use [as.sealevel()] to create a
#' `sealevel` object.
#' The Hawaii archive site at
#' `http://ilikai.soest.hawaii.edu/uhslc/datai.html` at one time provided a graphical
#' interface for downloading sealevel data in Type 1, with format that was once
#' described at `http://ilikai.soest.hawaii.edu/rqds/hourly.fmt` (although that link
#' was observed to no longer work, on December 4, 2016).
#' Examination of data retrieved from what seems to be a replacement Hawaii server
#' (https://uhslc.soest.hawaii.edu/data/?rq) in September 2019 indicated that the
#' format had been changed to what is called Type 3 by `read.sealevel`.
#' Web searches did not uncover documentation on this format, so the
#' decoding scheme was developed solely through examination of
#' data files, which means that it might be not be correct.
#' The MEDS repository (\url{http://www.isdm-gdsi.gc.ca/isdm-gdsi/index-eng.html})
#' provides Type 2 data.
#'
#' @param file a connection or a character string giving the name of the file
#' to load. See Details for the types of files that are recognized.
#'
#' @param tz time zone. The default value, `oceTz`, is set to `UTC`
#' at setup. (If a time zone is present in the file header, this will
#' supercede the value given here.)
#'
#' @param processingLog if provided, the action item to be stored in the log.
#' (Typically only provided for internal calls; the default that it provides is
#' better for normal calls by a user.)
#' @template debugTemplate
#'
#' @return A [sealevel-class] object.
#'
#' @author Dan Kelley
#'
#' @family things related to sealevel data
read.sealevel <- function(file, tz=getOption("oceTz"), processingLog, debug=getOption("oceDebug"))
{
oceDebug(debug, "read.sealevel(file=\"", file, "\", ...) {\n", sep="", unindent=1)
if (!missing(file) && is.character(file) && 0 == file.info(file)$size)
stop("empty file")
filename <- "?"
if (is.character(file)) {
filename <- fullFilename(file)
file <- file(file, "r")
on.exit(close(file))
}
if (!inherits(file, "connection"))
stop("argument `file' must be a character string or connection")
if (!isOpen(file)) {
filename <- "(connection)"
open(file, "r")
on.exit(close(file))
}
fileOrig <- file
firstLine <- readLines(file, n=1, encoding="UTF-8")
header <- firstLine
oceDebug(debug, "header (first line in file): '", header, "'\n", sep="")
pushBack(firstLine, file)
stationNumber <- NA
stationVersion <- NA
stationName <- NULL
region <- NULL
year <- NA
latitude <- NA
longitude <- NA
GMTOffset <- NA
decimationMethod <- NA
referenceOffset <- NA
referenceCode <- NA
res <- new('sealevel')
if (substr(firstLine, 1, 12) == "Station_Name") {
oceDebug(debug, "File is of format 1 (e.g. as in MEDS archives)\n")
## Station_Name,HALIFAX
## Station_Number,490
## Latitude_Decimal_Degrees,44.666667
## Longitude_Decimal_Degrees,63.583333
## Datum,CD
## Time_Zone,AST
## SLEV=Observed Water Level
## Obs_date,SLEV
## 01/01/2001 12:00 AM,1.82,
headerLength <- 8
header <- readLines(file, n = headerLength)
if (debug > 0) {
print(header)
}
stationName <- strsplit(header[1], ",")[[1]][2]
stationNumber <- as.numeric(strsplit(header[2], ",")[[1]][2])
latitude <- as.numeric(strsplit(header[3], ",")[[1]][2])
longitude <- as.numeric(strsplit(header[4], ",")[[1]][2])
tz <- strsplit(header[6], ",")[[1]][2] # needed for get GMT offset
GMTOffset <- GMTOffsetFromTz(tz)
oceDebug(debug, "about to read data\n")
x <- read.csv(file, header=FALSE, stringsAsFactors=FALSE)#, skip=headerLength)
oceDebug(debug, "... finished reading data\n")
if (length(grep("[0-9]{4}/", x$V1[1])) > 0) {
oceDebug(debug, "Date format is year/month/day hour:min with hour in range 1:24\n")
time <- strptime(as.character(x$V1), "%Y/%m/%d %H:%M", "UTC") + 3600 * GMTOffset
} else {
oceDebug(debug, "Date format is day/month/year hour:min AMPM with hour in range 1:12 and AMPM indicating whether day or night\n")
time <- strptime(as.character(x$V1), "%d/%m/%Y %I:%M %p", "UTC") + 3600 * GMTOffset
}
elevation <- as.numeric(x$V2)
oceDebug(debug, "tz=", tz, "so GMTOffset=", GMTOffset, "\n",
"first pass has time string:", as.character(x$V1)[1], "\n",
"first pass has time start:", format(time[1]), " ", attr(time[1], "tzone"), "\n")
year <- as.POSIXlt(time[1])$year + 1900
} else {
oceDebug(debug, "File is of type 2 or 3\n")
d <- readLines(file)
n <- length(d)
header <- d[1]
if (grepl("LAT=", header) && grepl("LONG=", header) && grepl("TIMEZONE",header)) {
## URL
## http://uhslc.soest.hawaii.edu/woce/h275.dat
## is a sample file, which starts as below (with quote marks added):
## '275HALIFAX 1895 LAT=44 40.0N LONG=063 35.0W TIMEZONE=GMT '
## '275HALIFAX 189501011 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
## '275HALIFAX 189501012 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
oceDebug(debug, "type 3 (format inferred/guessed from e.g. http://uhslc.soest.hawaii.edu/woce/h275.dat)\n")
stationNumber <- strtrim(header, 3)
oceDebug(debug, " stationNumber='", stationNumber, "'\n", sep="")
longitudeString <- gsub("^.*LONG=([ 0-9.]*[EWew]).*$", "\\1", header)
latitudeString <- gsub("^.*LAT=([ 0-9.]*[NSns]).*$", "\\1", header)
oceDebug(debug, " longitudeString='", longitudeString, "'\n", sep="")
oceDebug(debug, " latitudeString='", latitudeString, "'\n", sep="")
longitudeSplit <- strsplit(longitudeString, split="[ \t]+")[[1]]
longitudeDegree <- longitudeSplit[1]
longitudeMinute <- longitudeSplit[2]
oceDebug(debug, " longitudeDegree='", longitudeDegree, "'\n", sep="")
oceDebug(debug, " longitudeMinute='", longitudeMinute, "'\n", sep="")
longitudeSign <- if (grepl("[wW]", longitudeMinute)) -1 else 1
oceDebug(debug, " longitudeSign=", longitudeSign, "\n")
longitudeMinute <- gsub("[EWew]", "", longitudeMinute)
oceDebug(debug, " longitudeMinute='", longitudeMinute, "' after removing EW suffix\n", sep="")
longitude <- longitudeSign * (as.numeric(longitudeDegree) + as.numeric(longitudeMinute)/60)
oceDebug(debug, " longitude=", longitude, "\n")
latitudeSplit <- strsplit(latitudeString, split="[ \t]+")[[1]]
latitudeDegree <- latitudeSplit[1]
latitudeMinute <- latitudeSplit[2]
latitudeSign <- if (grepl("[sS]", latitudeMinute)) -1 else 1
oceDebug(debug, " latitudeSign=", latitudeSign, "\n")
latitudeMinute <- gsub("[SNsn]", "", latitudeMinute)
oceDebug(debug, " latitudeMinute='", latitudeMinute, "' after removing NS suffix\n", sep="")
latitude <- latitudeSign * (as.numeric(latitudeDegree) + as.numeric(latitudeMinute)/60)
oceDebug(debug, " latitude=", latitude, "\n")
## Remove interspersed year boundaries (which look like the first line).
d2 <- d[!grepl("LAT=.*LONG=", d)]
start <- 1 + which(strsplit(header,"")[[1]]==" ")[1]
d3 <- substr(d2, start, 1000L)
## Fix problem where the month is sometimes e.g. ' 1' instead of '01'
d4 <- gsub("^([1-9][0-9]{3}) ", "\\10", d3)
## Fix problem where the day is sometimes e.g. ' 1' instead of '01'
d5 <- gsub("^([1-9][0-9]{3}[0-9]{2}) ", "\\10", d4)
n <- length(d5)
## Now we have as below. But the second block sometimes has ' ' for '0', so we
## need to fix that.
## '275HALIFAX 189501011 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999 9999'
twelve <- seq(1, 12, 1)
elevation <- rep(NA, 12 * n)
time <- rep(NA, 12 * n)
lastDayPortion <- NULL # value defined at i=1, checked at i>2, so the initial value is immaterial
for (i in 1:n) {
sp <- strsplit(d5[i], "[ ]+")[[1]]
target.index <- 12 * (i-1) + twelve
## oceDebug(debug, " i=", i, ", length(sp)=", length(sp), "\n")
if (length(sp) != 13) {
stop("cannot parse tokens on line '", d2[i], "'\n", sep="")
}
elevation[target.index] <- as.numeric(sp[2:13])
dayPortion <- as.numeric(substr(sp[1], 9, 9))
if (i == 1) {
startDay <- as.POSIXct(strptime(paste(substr(sp[1], 1, 8), "00:00:00"), "%Y%m%d"), tz=tz)
oceDebug(debug, " startDay=", startDay, "\n")
} else {
if (dayPortion == 1) {
if (i > 2 && lastDayPortion != 2)
stop("non-alternating day portions on data line ", i)
} else if (dayPortion == 2) {
if (i > 2 && lastDayPortion != 1)
stop("non-alternating day portions on data line ", i)
} else {
stop("day portion is ", dayPortion, " but must be 1 or 2, on data line", i)
}
}
lastDayPortion <- dayPortion
time[target.index] <- as.POSIXct(sp[1], format="%Y%m%d",tz="UTC")+3600*(seq(0,11) + 12 * (dayPortion-1))
}
elevation[elevation==9999] <- NA
elevation <- elevation / 1000 # convert mm to m
time <- numberAsPOSIXct(time, tz="UTC") # guess on timezone
} else {
oceDebug(debug, "type 2 (an old Hawaii format, inferred from documentation)\n")
stationNumber <- substr(header, 1, 3)
stationVersion <- substr(header, 4, 4)
stationName <- substr(header, 6, 23)
stationName <- sub("[ ]*$", "", stationName)
region <- substr(header, 25, 43)
region <- sub("[ ]*$", "", region)
year <- substr(header, 45, 48)
latitudeStr <- substr(header, 50, 55) #degrees,minutes,tenths,hemisphere
latitude <- as.numeric(substr(latitudeStr, 1, 2)) + (as.numeric(substr(latitudeStr, 3, 5)))/600
if (tolower(substr(latitudeStr, 6, 6)) == "s") latitude <- -latitude
longitudeStr <- substr(header, 57, 63) #degrees,minutes,tenths,hemisphere
longitude <- as.numeric(substr(longitudeStr, 1, 3)) + (as.numeric(substr(longitudeStr, 4, 6)))/600
if (tolower(substr(longitudeStr, 7, 7)) == "w") longitude <- -longitude
GMTOffset <- substr(header, 65, 68) #hours,tenths (East is +ve)
oceDebug(debug, "GMTOffset=", GMTOffset, "\n")
decimationMethod <- substr(header, 70, 70) #1=filtered 2=average 3=spot readings 4=other
referenceOffset <- substr(header, 72, 76) # add to values
referenceCode <- substr(header, 77, 77) # add to values
units <- substr(header, 79, 80)
oceDebug(debug, "units=", units, "\n")
if (nchar(units) == 0) {
warning("no units can be inferred from the file, so assuming 'mm'")
} else {
if (units != "mm" && units != "MM")
stop("require units to be 'mm' or 'MM', not '", units, "'")
}
elevation <- array(NA_real_, 12 * (n-1))
## first.twelve.hours <- 3600 * (0:11)
## second.twelve.hours <- 3600 * (12:23)
twelve <- seq(1, 12, 1)
lastDayPortion <- -1 # ignored; prevents undefined warning in code analysis
for (i in 2:n) {
sp <- strsplit(d[i], "[ ]+")[[1]]
target.index <- 12 * (i-2) + twelve
elevation[target.index] <- as.numeric(sp[4:15])
dayPortion <- as.numeric(substr(sp[3], 9, 9))
if (i == 2) {
startDay <- as.POSIXct(strptime(paste(substr(sp[3], 1, 8), "00:00:00"), "%Y%m%d"), tz=tz)
} else {
if (dayPortion == 1) {
if (i > 2 && lastDayPortion != 2)
stop("non-alternating day portions on data line ", i)
} else if (dayPortion == 2) {
if (i > 2 && lastDayPortion != 1)
stop("non-alternating day portions on data line ", i)
} else {
stop("day portion is ", dayPortion, " but must be 1 or 2, on data line", i)
}
}
lastDayPortion <- dayPortion
}
time <- as.POSIXct(startDay + 3600 * (seq(0, 12 * (n-1)-1)), tz=tz)
elevation[elevation==9999] <- NA
if (tolower(units) == "mm") {
elevation <- elevation / 1000
} else {
stop("require units to be MM")
}
}
}
##num.missing <- sum(is.na(elevation))
##if (num.missing > 0) warning("there are ", num.missing, " missing points in this timeseries, at indices ", paste(which(is.na(elevation)), ""))
res@metadata$filename <- filename
res@metadata$header <- header
res@metadata$year <- year
res@metadata$stationNumber <- stationNumber
res@metadata$stationVersion <- stationVersion
res@metadata$stationName <- stationName
res@metadata$region <- region
res@metadata$latitude <- latitude
res@metadata$longitude <- longitude
res@metadata$GMTOffset <- GMTOffset
res@metadata$decimationMethod <- decimationMethod
res@metadata$referenceOffset <- referenceOffset
res@metadata$referenceCode <- referenceCode
res@metadata$units <- list(elevation=list(unit=expression(m), scale=""))
res@metadata$n <- length(time)
## deltat is in hours
res@metadata$deltat <- if (res@metadata$n > 1) (as.numeric(time[2]) - as.numeric(time[1])) / 3600 else 0
if (missing(processingLog))
processingLog <- paste('read.sealevel(file="', file, '", tz="', tz, sep="", collapse="")
res@data$elevation <- elevation
res@data$time <- time
res@processingLog <- processingLogAppend(res@processingLog,
paste('read.sealevel(file="', fileOrig, '", tz="', tz, '")', sep="", collapse=""))
res
}
|
#' kpPlotNames
#'
#' @description
#'
#' Plots text labels with positioning relative to rectangles along the genome.
#'
#' @details
#'
#' This is a simple wrapper around \code{\link{kpText}} that positions the
#' text relative to the rectangles defined by its arguments. They may be
#' used to name or label different graphical elements in the plot.
#' The rectangles may be specified as in \code{\link{kpRect}} the relative
#' positions accepted are: "left", "right", "top", "bottom", "center".
#' It is possible to specify and empty label (\code{labels=""}) to leave an
#' element without name.
#'
#' @usage kpPlotNames(karyoplot, data=NULL, chr=NULL, x0=NULL, x1=x0, y0=NULL, y1=NULL, labels=NULL, position="left", ymax=NULL, ymin=NULL, r0=NULL, r1=NULL, data.panel=1, clipping=TRUE, ...)
#'
#' @inheritParams kpRect
#' @param position (character) The position of the text relative to the rectangle. Can be "left", "right", "top", "bottom" or "center". Defaults to "left".
#' @param labels (character) The labels to use in the plot. They will be associated to the rectangles by its order and recycled as needed.
#'
#' @return
#'
#' Returns the original karyoplot object, unchanged.
#'
#' @seealso \code{\link{kpText}}, \code{\link{kpRect}}
#'
#' @examples
#'
#'
#' regs <- toGRanges(data.frame(chr=c("chr1", "chr1", "chr1"),
#' start=c(20e6, 100e6, 200e6),
#' end=c(40e6, 170e6, 210e6),
#' y0=c(0.1, 0.5, 0.7),
#' y1=c(0.5, 0.6, 0.95)))
#'
#' kp <- plotKaryotype(genome="hg19", chromosomes="chr1")
#' kpRect(kp, data=regs)
#'
#' kpPlotNames(kp, data=regs, labels=c("R1", "R2", "R3"))
#' kpPlotNames(kp, data=regs, labels=c("R1", "R2", "R3"), position="top", cex=2)
#' kpPlotNames(kp, data=regs, labels=c("R1", "", "R3"), position="right", col="red")
#' kpPlotNames(kp, data=regs, labels="bottom", position="bottom", col=rainbow(3))
#' kpPlotNames(kp, data=regs, labels="o", position="center", col=rainbow(3), cex=1)
#'
#'@export kpPlotNames
kpPlotNames <- function(karyoplot, data=NULL, chr=NULL, x0=NULL, x1=x0, y0=NULL, y1=NULL,
labels=NULL, position="left",
ymax=NULL, ymin=NULL, r0=NULL, r1=NULL, data.panel=1, clipping=TRUE, ...) {
#karyoplot
if(missing(karyoplot)) stop("The parameter 'karyoplot' is required")
if(!methods::is(karyoplot, "KaryoPlot")) stop("'karyoplot' must be a valid 'KaryoPlot' object")
#position
position <- match.arg(position, c("left", "right", "top", "bottom", "center"))
#Note: we use r0=0 and r1=1 (and ymin=0 and ymax=1) so we only use data input normalization (data, chr, etc...) but not normalization of r0, ymins, etc...
pp <- prepareParameters4("kpPlotNames", karyoplot=karyoplot, data=data, chr=chr, x0=x0, x1=x1,
y0=y0, y1=y1, ymin=0, ymax=1, r0=0, r1=1,
data.panel=data.panel, ...)
#if there's nothing to plot, return
if(length(pp$chr)==0) {
invisible(karyoplot)
}
#Now decide how to plot (with respect to the rectangles), and call kpText with the appropiate parameters
switch(position,
left=kpText(karyoplot, chr=pp$chr, x=pp$x0, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, pos=2, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
right=kpText(karyoplot, chr=pp$chr, x=pp$x1, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, pos=4, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
top=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y1, labels=labels, pos=3, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
bottom=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y0, labels=labels, pos=1, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
center=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...)
)
invisible(karyoplot)
}
| /R/kpPlotNames.R | no_license | Yue-Jiang/karyoploteR | R | false | false | 4,165 | r | #' kpPlotNames
#'
#' @description
#'
#' Plots text labels with positioning relative to rectangles along the genome.
#'
#' @details
#'
#' This is a simple wrapper around \code{\link{kpText}} that positions the
#' text relative to the rectangles defined by its arguments. They may be
#' used to name or label different graphical elements in the plot.
#' The rectangles may be specified as in \code{\link{kpRect}} the relative
#' positions accepted are: "left", "right", "top", "bottom", "center".
#' It is possible to specify and empty label (\code{labels=""}) to leave an
#' element without name.
#'
#' @usage kpPlotNames(karyoplot, data=NULL, chr=NULL, x0=NULL, x1=x0, y0=NULL, y1=NULL, labels=NULL, position="left", ymax=NULL, ymin=NULL, r0=NULL, r1=NULL, data.panel=1, clipping=TRUE, ...)
#'
#' @inheritParams kpRect
#' @param position (character) The position of the text relative to the rectangle. Can be "left", "right", "top", "bottom" or "center". Defaults to "left".
#' @param labels (character) The labels to use in the plot. They will be associated to the rectangles by its order and recycled as needed.
#'
#' @return
#'
#' Returns the original karyoplot object, unchanged.
#'
#' @seealso \code{\link{kpText}}, \code{\link{kpRect}}
#'
#' @examples
#'
#'
#' regs <- toGRanges(data.frame(chr=c("chr1", "chr1", "chr1"),
#' start=c(20e6, 100e6, 200e6),
#' end=c(40e6, 170e6, 210e6),
#' y0=c(0.1, 0.5, 0.7),
#' y1=c(0.5, 0.6, 0.95)))
#'
#' kp <- plotKaryotype(genome="hg19", chromosomes="chr1")
#' kpRect(kp, data=regs)
#'
#' kpPlotNames(kp, data=regs, labels=c("R1", "R2", "R3"))
#' kpPlotNames(kp, data=regs, labels=c("R1", "R2", "R3"), position="top", cex=2)
#' kpPlotNames(kp, data=regs, labels=c("R1", "", "R3"), position="right", col="red")
#' kpPlotNames(kp, data=regs, labels="bottom", position="bottom", col=rainbow(3))
#' kpPlotNames(kp, data=regs, labels="o", position="center", col=rainbow(3), cex=1)
#'
#'@export kpPlotNames
kpPlotNames <- function(karyoplot, data=NULL, chr=NULL, x0=NULL, x1=x0, y0=NULL, y1=NULL,
labels=NULL, position="left",
ymax=NULL, ymin=NULL, r0=NULL, r1=NULL, data.panel=1, clipping=TRUE, ...) {
#karyoplot
if(missing(karyoplot)) stop("The parameter 'karyoplot' is required")
if(!methods::is(karyoplot, "KaryoPlot")) stop("'karyoplot' must be a valid 'KaryoPlot' object")
#position
position <- match.arg(position, c("left", "right", "top", "bottom", "center"))
#Note: we use r0=0 and r1=1 (and ymin=0 and ymax=1) so we only use data input normalization (data, chr, etc...) but not normalization of r0, ymins, etc...
pp <- prepareParameters4("kpPlotNames", karyoplot=karyoplot, data=data, chr=chr, x0=x0, x1=x1,
y0=y0, y1=y1, ymin=0, ymax=1, r0=0, r1=1,
data.panel=data.panel, ...)
#if there's nothing to plot, return
if(length(pp$chr)==0) {
invisible(karyoplot)
}
#Now decide how to plot (with respect to the rectangles), and call kpText with the appropiate parameters
switch(position,
left=kpText(karyoplot, chr=pp$chr, x=pp$x0, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, pos=2, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
right=kpText(karyoplot, chr=pp$chr, x=pp$x1, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, pos=4, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
top=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y1, labels=labels, pos=3, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
bottom=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y0, labels=labels, pos=1, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...),
center=kpText(karyoplot, chr=pp$chr, x=pp$x0+(pp$x1-pp$x0)/2, y=pp$y0+(pp$y1-pp$y0)/2, labels=labels, ymin=ymin, ymax=ymax, r0=r0, r1=r1, clipping=clipping, data.panel=data.panel, ...)
)
invisible(karyoplot)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{e_area}
\alias{e_area}
\alias{e_area_}
\title{Area}
\usage{
e_area(
e,
serie,
bind,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
e_area_(
e,
serie,
bind = NULL,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{serie}{Column name of serie to plot.}
\item{bind}{Binding between datasets, namely for use of \code{\link{e_brush}}.}
\item{name}{name of the serie.}
\item{legend}{Whether to add serie to legend.}
\item{y_index}{Indexes of x and y axis.}
\item{x_index}{Indexes of x and y axis.}
\item{coord_system}{Coordinate system to plot against.}
\item{...}{Any other option to pass, check See Also section.}
}
\description{
Add area serie.
}
\examples{
CO2 |>
group_by(Plant) |>
e_charts(conc) |>
e_area(uptake) |>
e_tooltip(trigger = "axis")
# timeline
iris |>
group_by(Species) |>
e_charts(Sepal.Length, timeline = TRUE) |>
e_area(Sepal.Width) |>
e_tooltip(trigger = "axis")
}
\seealso{
\href{https://echarts.apache.org/en/option.html#series-line}{Additional arguments}
}
| /man/e_area.Rd | permissive | phgis/echarts4r | R | false | true | 1,372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{e_area}
\alias{e_area}
\alias{e_area_}
\title{Area}
\usage{
e_area(
e,
serie,
bind,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
e_area_(
e,
serie,
bind = NULL,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{serie}{Column name of serie to plot.}
\item{bind}{Binding between datasets, namely for use of \code{\link{e_brush}}.}
\item{name}{name of the serie.}
\item{legend}{Whether to add serie to legend.}
\item{y_index}{Indexes of x and y axis.}
\item{x_index}{Indexes of x and y axis.}
\item{coord_system}{Coordinate system to plot against.}
\item{...}{Any other option to pass, check See Also section.}
}
\description{
Add area serie.
}
\examples{
CO2 |>
group_by(Plant) |>
e_charts(conc) |>
e_area(uptake) |>
e_tooltip(trigger = "axis")
# timeline
iris |>
group_by(Species) |>
e_charts(Sepal.Length, timeline = TRUE) |>
e_area(Sepal.Width) |>
e_tooltip(trigger = "axis")
}
\seealso{
\href{https://echarts.apache.org/en/option.html#series-line}{Additional arguments}
}
|
#Extract the dataset.
fileName <- "exdata_data_household_power_consumption.zip"
unzip(fileName)
#Read the data.
data <- read.table("household_power_consumption.txt", header=TRUE,
sep=";", stringsAsFactors=FALSE, na.strings="?")
#Subset the data from the dates 2007-02-01 and 2007-02-02.
subsetData <- subset(data, data$Date=="2/2/2007"|data$Date=="1/2/2007")
# Create "plot4.png"
globalActivePower <- as.numeric(subsetData$Global_reactive_power)
DateTime <- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
voltage <- as.numeric(subsetData$Voltage)
subMetering1 <- as.numeric(subsetData$Sub_metering_1)
subMetering2 <- as.numeric(subsetData$Sub_metering_2)
subMetering3 <- as.numeric(subsetData$Sub_metering_3)
globalReactivePower <- as.numeric(subsetData$Global_active_power)
png("plot4.png", width=480, height=480)
#Prepare for placement of the graphs into a 2 rows and 2 columns format.
par(mfrow=c(2,2))
# Create upper left graph
plot(DateTime, globalActivePower, type="l", xlab="", ylab="Global Active Power")
# Create upper right graph
plot(DateTime, voltage, type="l", xlab="datetime", ylab = "Voltage")
# Create lower left graph
plot(DateTime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(DateTime, subMetering2, type="l", col="red")
lines(DateTime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1, col=c("black", "red", "blue"))
# Create lower right graph
plot(DateTime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | PreciousEunice/ExData_Plotting1 | R | false | false | 1,628 | r | #Extract the dataset.
fileName <- "exdata_data_household_power_consumption.zip"
unzip(fileName)
#Read the data.
data <- read.table("household_power_consumption.txt", header=TRUE,
sep=";", stringsAsFactors=FALSE, na.strings="?")
#Subset the data from the dates 2007-02-01 and 2007-02-02.
subsetData <- subset(data, data$Date=="2/2/2007"|data$Date=="1/2/2007")
# Create "plot4.png"
globalActivePower <- as.numeric(subsetData$Global_reactive_power)
DateTime <- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
voltage <- as.numeric(subsetData$Voltage)
subMetering1 <- as.numeric(subsetData$Sub_metering_1)
subMetering2 <- as.numeric(subsetData$Sub_metering_2)
subMetering3 <- as.numeric(subsetData$Sub_metering_3)
globalReactivePower <- as.numeric(subsetData$Global_active_power)
png("plot4.png", width=480, height=480)
#Prepare for placement of the graphs into a 2 rows and 2 columns format.
par(mfrow=c(2,2))
# Create upper left graph
plot(DateTime, globalActivePower, type="l", xlab="", ylab="Global Active Power")
# Create upper right graph
plot(DateTime, voltage, type="l", xlab="datetime", ylab = "Voltage")
# Create lower left graph
plot(DateTime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(DateTime, subMetering2, type="l", col="red")
lines(DateTime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1, col=c("black", "red", "blue"))
# Create lower right graph
plot(DateTime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
### =========================================================================
### Low-level manipulation of HDF5 Dimension Scale datasets
### -------------------------------------------------------------------------
###
### Nothing in this file is exported.
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### h5isdimscale()
###
h5isdimscale <- function(filepath, name)
{
.Call2("C_h5isdimscale", filepath, name, PACKAGE="HDF5Array")
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### h5getdimscales() / h5setdimscales()
###
### Retrieve the names of the existing HDF5 datasets (1 per dimension in
### dataset 'name') currently attached along the dimensions of dataset 'name'
### for Dimension Scale 'scalename'.
h5getdimscales <- function(filepath, name, scalename=NA)
{
stopifnot(isSingleStringOrNA(scalename))
scalename <- as.character(scalename)
.Call2("C_h5getdimscales", filepath, name, scalename,
PACKAGE="HDF5Array")
}
### name: The name of the dataset on which to set Dimension Scales.
### dimscales: A character vector containing the names of the existing HDF5
### datasets (1 per dimension in dataset 'name') to attach along
### the dimensions of dataset 'name'. NAs are allowed and, if
### present, nothing gets attached along the corresponding
### dimensions.
### scalename: The name of the Dimension Scale (analog to the name of an
### attribute in R).
h5setdimscales <- function(filepath, name, dimscales, scalename=NA,
dry.run=FALSE)
{
stopifnot(isSingleStringOrNA(scalename),
is.character(dimscales),
isTRUEorFALSE(dry.run))
scalename <- as.character(scalename)
.Call2("C_h5setdimscales", filepath, name, dimscales, scalename, dry.run,
PACKAGE="HDF5Array")
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Get/set the "dimension labels" of an HDF5 dataset
###
### The "dimension labels" the HDF5 equivalent of the names on 'dimnames(a)'
### in R.
###
h5getdimlabels <- function(filepath, name)
{
.Call2("C_h5getdimlabels", filepath, name, PACKAGE="HDF5Array")
}
h5setdimlabels <- function(filepath, name, dimlabels)
{
stopifnot(is.character(dimlabels))
invisible(.Call2("C_h5setdimlabels", filepath, name, dimlabels,
PACKAGE="HDF5Array"))
}
| /R/h5dimscales.R | no_license | Bioconductor/HDF5Array | R | false | false | 2,530 | r | ### =========================================================================
### Low-level manipulation of HDF5 Dimension Scale datasets
### -------------------------------------------------------------------------
###
### Nothing in this file is exported.
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### h5isdimscale()
###
h5isdimscale <- function(filepath, name)
{
.Call2("C_h5isdimscale", filepath, name, PACKAGE="HDF5Array")
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### h5getdimscales() / h5setdimscales()
###
### Retrieve the names of the existing HDF5 datasets (1 per dimension in
### dataset 'name') currently attached along the dimensions of dataset 'name'
### for Dimension Scale 'scalename'.
h5getdimscales <- function(filepath, name, scalename=NA)
{
stopifnot(isSingleStringOrNA(scalename))
scalename <- as.character(scalename)
.Call2("C_h5getdimscales", filepath, name, scalename,
PACKAGE="HDF5Array")
}
### name: The name of the dataset on which to set Dimension Scales.
### dimscales: A character vector containing the names of the existing HDF5
### datasets (1 per dimension in dataset 'name') to attach along
### the dimensions of dataset 'name'. NAs are allowed and, if
### present, nothing gets attached along the corresponding
### dimensions.
### scalename: The name of the Dimension Scale (analog to the name of an
### attribute in R).
h5setdimscales <- function(filepath, name, dimscales, scalename=NA,
dry.run=FALSE)
{
stopifnot(isSingleStringOrNA(scalename),
is.character(dimscales),
isTRUEorFALSE(dry.run))
scalename <- as.character(scalename)
.Call2("C_h5setdimscales", filepath, name, dimscales, scalename, dry.run,
PACKAGE="HDF5Array")
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Get/set the "dimension labels" of an HDF5 dataset
###
### The "dimension labels" the HDF5 equivalent of the names on 'dimnames(a)'
### in R.
###
h5getdimlabels <- function(filepath, name)
{
.Call2("C_h5getdimlabels", filepath, name, PACKAGE="HDF5Array")
}
h5setdimlabels <- function(filepath, name, dimlabels)
{
stopifnot(is.character(dimlabels))
invisible(.Call2("C_h5setdimlabels", filepath, name, dimlabels,
PACKAGE="HDF5Array"))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bite.R
\name{bite}
\alias{bite}
\title{Body Intake Estimate}
\usage{
bite(file_name = "")
}
\arguments{
\item{file_name}{Name of input .csv file containing additive uses and use level by food code.
See help >package?biteR for input file requirements.
type "amt" or "size" to define what is being estimated: amount of the food consumed or
size of the portion consumed}
}
\value{
3 matrices of population stats containing all strata, one for each: full, female, and male
}
\description{
Given an input file of food codes and additive levels, returns matrices of population intake statistics
based on NHANES dietary survey data.
}
\details{
This function reads an input file of food codes, use, relevant proportion of the food code, and
additive use level and produces population intake statistics for strata, currently hard-coded.
See biteR-package help >package?biteR for details of strata and input file format requirements.
In subsequent versions it is expected that these strata and features of the input file
will be parameterizable by the user.
This function is a wrap-around for 3 underlying steps in the BITE process:
param.bite(file_name = ""), raw.bite(x), and pop.bite(x)
This function is for estimating (sub)population average daily intake levels. To estimate
average size(gr) consumed of a food in any eating occasion, please see bite.size().
}
| /man/bite.Rd | no_license | ToxStrategies/biteR | R | false | false | 1,448 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bite.R
\name{bite}
\alias{bite}
\title{Body Intake Estimate}
\usage{
bite(file_name = "")
}
\arguments{
\item{file_name}{Name of input .csv file containing additive uses and use level by food code.
See help >package?biteR for input file requirements.
type "amt" or "size" to define what is being estimated: amount of the food consumed or
size of the portion consumed}
}
\value{
3 matrices of population stats containing all strata, one for each: full, female, and male
}
\description{
Given an input file of food codes and additive levels, returns matrices of population intake statistics
based on NHANES dietary survey data.
}
\details{
This function reads an input file of food codes, use, relevant proportion of the food code, and
additive use level and produces population intake statistics for strata, currently hard-coded.
See biteR-package help >package?biteR for details of strata and input file format requirements.
In subsequent versions it is expected that these strata and features of the input file
will be parameterizable by the user.
This function is a wrap-around for 3 underlying steps in the BITE process:
param.bite(file_name = ""), raw.bite(x), and pop.bite(x)
This function is for estimating (sub)population average daily intake levels. To estimate
average size(gr) consumed of a food in any eating occasion, please see bite.size().
}
|
## The following pair of functions caches the inverse of a matrix.
## This function contains a list with functions to set the value of a matrix, get the value of the matrix, set the value of the inverse, and get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function (y) {
x <<- y
i <<- NULL
}
get <- function () x
setinverse <- function(solve) i <<- solve
getinverse <- function () i
list (set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function calculates the inverse of the matrix created with the above function. It first checks to see if the inverse has already been calculated. If yes, it gets the inverse from the cache and skips the computation. Else, it computes the inverse and sets the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(~is.null(i)) {
message("Getting cached data")
return (i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | izlude83/ProgrammingAssignment2 | R | false | false | 1,113 | r | ## The following pair of functions caches the inverse of a matrix.
## This function contains a list with functions to set the value of a matrix, get the value of the matrix, set the value of the inverse, and get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function (y) {
x <<- y
i <<- NULL
}
get <- function () x
setinverse <- function(solve) i <<- solve
getinverse <- function () i
list (set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function calculates the inverse of the matrix created with the above function. It first checks to see if the inverse has already been calculated. If yes, it gets the inverse from the cache and skips the computation. Else, it computes the inverse and sets the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(~is.null(i)) {
message("Getting cached data")
return (i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
testlist <- list(lims = structure(c(3.93750549037925e+92, 2.15221254243864e+71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 5:6), points = structure(NA_real_, .Dim = c(1L, 1L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987838-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 289 | r | testlist <- list(lims = structure(c(3.93750549037925e+92, 2.15221254243864e+71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 5:6), points = structure(NA_real_, .Dim = c(1L, 1L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
#
# Thomas.R, 17 Mar 18
# Data from:
# Security metrics for computer systems
# Daniel R. Thomas
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("diagram")
plot_layout(1, 1, default_width=ESEUR_default_width+1,
default_height=ESEUR_default_height+3)
elpos=coordinates (c(4, 2, 3, 1, 2, 1))
# Nodes appear left to right, starting top left, finish bottom right
names=c(" ", "OpenSSL ", " BouncyCastle\n(176)", " ",
"Other\nprojects", "Linux",
" ", "Google", "Hardware\ndeveloper",
"Device manufacturer\n(402)",
" ", "Network operator\n(1,650)",
"Device\n(24,600)")
M=matrix(data=0, nrow=length(names), ncol=length(names))
colnames(M)=names
rownames(M)=names
#
M["OpenSSL ", "Google"]="52"; M[" BouncyCastle\n(176)", "Google"]="6"
M["Other\nprojects", "Google"]=""; M["Linux", "Google"]="602"
M["Google", "Device manufacturer\n(402)"]="30"; M["Hardware\ndeveloper", "Device manufacturer\n(402)"]=""
M["Hardware\ndeveloper", "Linux"]=""
M["Device manufacturer\n(402)", "Network operator\n(1,650)"]=""
M["Device manufacturer\n(402)", "Device\n(24,600)"]=""
M["Network operator\n(1,650)", "Device\n(24,600)"]="1650"
plotmat(t(M), pos=elpos, lwd=1, arr.lcol="green", arr.pos=0.6, arr.length=0.15, cex=1.2,
box.lcol="white", box.prop=0.5, box.size=0.05, box.cex=1.2, shadow.size=0)
| /ecosystems/Thomas.R | no_license | alanponce/ESEUR-code-data | R | false | false | 1,367 | r | #
# Thomas.R, 17 Mar 18
# Data from:
# Security metrics for computer systems
# Daniel R. Thomas
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("diagram")
plot_layout(1, 1, default_width=ESEUR_default_width+1,
default_height=ESEUR_default_height+3)
elpos=coordinates (c(4, 2, 3, 1, 2, 1))
# Nodes appear left to right, starting top left, finish bottom right
names=c(" ", "OpenSSL ", " BouncyCastle\n(176)", " ",
"Other\nprojects", "Linux",
" ", "Google", "Hardware\ndeveloper",
"Device manufacturer\n(402)",
" ", "Network operator\n(1,650)",
"Device\n(24,600)")
M=matrix(data=0, nrow=length(names), ncol=length(names))
colnames(M)=names
rownames(M)=names
#
M["OpenSSL ", "Google"]="52"; M[" BouncyCastle\n(176)", "Google"]="6"
M["Other\nprojects", "Google"]=""; M["Linux", "Google"]="602"
M["Google", "Device manufacturer\n(402)"]="30"; M["Hardware\ndeveloper", "Device manufacturer\n(402)"]=""
M["Hardware\ndeveloper", "Linux"]=""
M["Device manufacturer\n(402)", "Network operator\n(1,650)"]=""
M["Device manufacturer\n(402)", "Device\n(24,600)"]=""
M["Network operator\n(1,650)", "Device\n(24,600)"]="1650"
plotmat(t(M), pos=elpos, lwd=1, arr.lcol="green", arr.pos=0.6, arr.length=0.15, cex=1.2,
box.lcol="white", box.prop=0.5, box.size=0.05, box.cex=1.2, shadow.size=0)
|
#' ReconciliationReportService
#'
#' Provides methods for retrieving, submitting and reverting the
#' ReconciliationReport objects.
#' A ReconciliationReport is a group of
#' ReconciliationReportRow objects.
#'
#' getReconciliationReportsByStatement
#'
#' Gets an ReconciliationReportPage of ReconciliationReport objects that satisfy the given Statement query. The following fields are supported for filtering.
#' \itemize{
#' \item{id}
#' \item{status}
#' \item{startDate}
#' }
#'
#' @seealso \href{https://developers.google.com/doubleclick-publishers/docs/reference/v201802/ReconciliationReportService#getReconciliationReportsByStatement}{Google Documentation for getReconciliationReportsByStatement}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a getReconciliationReportsByStatementResponse
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getReconciliationReportsByStatement(dat)
#' }
#' @export
dfp_getReconciliationReportsByStatement <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ReconciliationReportService', root_name='getReconciliationReportsByStatement', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='getReconciliationReportsByStatementResponse', as_df=as_df)
return(result)
}
#'
#' updateReconciliationReports
#'
#' Updates the specified ReconciliationReport objects.
#'
#' @seealso \href{https://developers.google.com/doubleclick-publishers/docs/reference/v201802/ReconciliationReportService#updateReconciliationReports}{Google Documentation for updateReconciliationReports}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a updateReconciliationReportsResponse
#' @examples
#' \dontrun{
#' res <- dfp_updateReconciliationReports(request_data)
#' }
#' @export
dfp_updateReconciliationReports <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ReconciliationReportService', root_name='updateReconciliationReports', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='updateReconciliationReportsResponse', as_df=as_df)
return(result)
}
#'
| /R/ReconciliationReportService.R | no_license | JhuangSiteAnalytics/rdfp | R | false | false | 3,167 | r | #' ReconciliationReportService
#'
#' Provides methods for retrieving, submitting and reverting the
#' ReconciliationReport objects.
#' A ReconciliationReport is a group of
#' ReconciliationReportRow objects.
#'
#' getReconciliationReportsByStatement
#'
#' Gets an ReconciliationReportPage of ReconciliationReport objects that satisfy the given Statement query. The following fields are supported for filtering.
#' \itemize{
#' \item{id}
#' \item{status}
#' \item{startDate}
#' }
#'
#' @seealso \href{https://developers.google.com/doubleclick-publishers/docs/reference/v201802/ReconciliationReportService#getReconciliationReportsByStatement}{Google Documentation for getReconciliationReportsByStatement}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a getReconciliationReportsByStatementResponse
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getReconciliationReportsByStatement(dat)
#' }
#' @export
dfp_getReconciliationReportsByStatement <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ReconciliationReportService', root_name='getReconciliationReportsByStatement', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='getReconciliationReportsByStatementResponse', as_df=as_df)
return(result)
}
#'
#' updateReconciliationReports
#'
#' Updates the specified ReconciliationReport objects.
#'
#' @seealso \href{https://developers.google.com/doubleclick-publishers/docs/reference/v201802/ReconciliationReportService#updateReconciliationReports}{Google Documentation for updateReconciliationReports}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a updateReconciliationReportsResponse
#' @examples
#' \dontrun{
#' res <- dfp_updateReconciliationReports(request_data)
#' }
#' @export
dfp_updateReconciliationReports <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ReconciliationReportService', root_name='updateReconciliationReports', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='updateReconciliationReportsResponse', as_df=as_df)
return(result)
}
#'
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{cregt}
\alias{cregt}
\title{Create a Generalization Table}
\usage{
cregt(dat, ori.dat, QI, TA, ...)
}
\arguments{
\item{dat}{\code{qigrp} object}
\item{ori.dat}{Original Data}
\item{QI}{Quasi-Identifiers}
\item{TA}{Target Attribute}
\item{...}{}
}
\value{
Generalization Table
}
\description{
Create a Generalization Table
}
| /man/cregt.Rd | permissive | yongcha/entroPD | R | false | false | 389 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{cregt}
\alias{cregt}
\title{Create a Generalization Table}
\usage{
cregt(dat, ori.dat, QI, TA, ...)
}
\arguments{
\item{dat}{\code{qigrp} object}
\item{ori.dat}{Original Data}
\item{QI}{Quasi-Identifiers}
\item{TA}{Target Attribute}
\item{...}{}
}
\value{
Generalization Table
}
\description{
Create a Generalization Table
}
|
# Title : R Use Case - Advanced methods with Caret – ADS_5
# Objective : Predicting breast cancer
# Created by: Pascal
# Created on: 08-11-20
# Max Kuhn and Kjell Johnson, Applied Predictive Modeling (book, 2016)
# Ref. http://appliedpredictivemodeling.com/
# Ref. http://topepo.github.io/caret/index.html
getwd()
# "C:/Users/User/Workspace/RprogProjects/AppliedDataScience_5"
cancer_data <- read.csv("breast_cancer.csv")
cancer_data[1:5,] # breast cancer data frame
dim(cancer_data) # 569 rows, 32 columns
# Exploratory Data Analysis
str(cancer_data)
head(cancer_data)
summary(cancer_data)
# Correlation to 'diagnosis'
# ref. https://www.rdocumentation.org/packages/psych/versions/2.0.9/topics/pairs.panels
library(psych)
pairs.panels(cancer_data[,c(2,3:10)]) # scatter plot of matrices (SPLOM)
pairs.panels(cancer_data[,c(2,10:20)])
pairs.panels(cancer_data[,c(2,21:32)])
# Principal Components Analysis (PCA)
# ref. https://www.datacamp.com/community/tutorials/pca-analysis-r
# http://www.sthda.com/french/articles/38-methodes-des-composantes-principales-dans-r-guide-pratique/79-acp-dans-r-prcomp-vs-princomp/
# https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/prcomp
scale_data <- scale(cancer_data[,3:32])
pca_data <- prcomp(scale_data)
str(pca_data); summary(pca_data)
# plotting this PCA
plot(pca_data, xlab="PCn variables")
# use the first three (3) PC1, PC2 and PC3
set_pca_data <- pca_data$x[,1:3]
head(set_pca_data)
#convert pca to data frame (569 rows, 3 columns)
final_data <- data.frame(set_pca_data)
# add the 'diagnostics' column (569 rows)
final_data$diagnosis <- cancer_data$diagnosis
head(final_data)
# scatter plot of matrices (SPLOM)
pairs.panels(final_data)
# Modelling and predicting
# ref. https://www.rdocumentation.org/packages/caret/versions/6.0-86/topics/createDataPartition
#
library(caret)
inTrain <- createDataPartition(y=final_data$diagnosis, p=0.7, list=FALSE)
training_data<- final_data[inTrain,]
test_data<- final_data[-inTrain,]
dim(training_data); dim(test_data)
table(training_data$diagnosis); table(test_data$diagnosis)
training_data
test_data
# Model building and test
# with 4 algorithms : bagging, boosting, neural networks, support vector machines
algoList <- c("bagFDA","LogitBoost","nnet","svmRadialCost")
results <- data.frame(Algorithm=character(), Duration=numeric(), Accuracy=numeric(), stringsAsFactors = FALSE )
for (i in 1:length(algoList)) {
algo <- algoList[i]
print(paste("Alogrithm:", algo))
startTime <- as.integer(Sys.time())
# Build model with 'train' function - ref. https://www.rdocumentation.org/packages/caret/versions/4.47/topics/train
model <- train( diagnosis ~ . , data = training_data , method = algo)
model
# Predict against test data
predicted <- predict(model, test_data)
length(predicted); length(test_data$diagnosis)
# Compare diagnosis prediction with test data
comp <- confusionMatrix(predicted, as.factor(test_data$diagnosis))
# Store the result
stopTime <- as.integer(Sys.time())
result <- c( as.character(algo) , stopTime - startTime , as.numeric(comp$overall[1]) )
print(result)
results[i,1] <- as.character(algo)
results[i,2] <- ( stopTime - startTime )
results[i,3] <- round(as.numeric(comp$overall[1]) * 100, 2)
}
#Print comparison results
results
| /Exercises/advanced_methods_with_caret.R | no_license | PascaL-IT/AppliedDataScience-SS | R | false | false | 3,320 | r | # Title : R Use Case - Advanced methods with Caret – ADS_5
# Objective : Predicting breast cancer
# Created by: Pascal
# Created on: 08-11-20
# Max Kuhn and Kjell Johnson, Applied Predictive Modeling (book, 2016)
# Ref. http://appliedpredictivemodeling.com/
# Ref. http://topepo.github.io/caret/index.html
getwd()
# "C:/Users/User/Workspace/RprogProjects/AppliedDataScience_5"
cancer_data <- read.csv("breast_cancer.csv")
cancer_data[1:5,] # breast cancer data frame
dim(cancer_data) # 569 rows, 32 columns
# Exploratory Data Analysis
str(cancer_data)
head(cancer_data)
summary(cancer_data)
# Correlation to 'diagnosis'
# ref. https://www.rdocumentation.org/packages/psych/versions/2.0.9/topics/pairs.panels
library(psych)
pairs.panels(cancer_data[,c(2,3:10)]) # scatter plot of matrices (SPLOM)
pairs.panels(cancer_data[,c(2,10:20)])
pairs.panels(cancer_data[,c(2,21:32)])
# Principal Components Analysis (PCA)
# ref. https://www.datacamp.com/community/tutorials/pca-analysis-r
# http://www.sthda.com/french/articles/38-methodes-des-composantes-principales-dans-r-guide-pratique/79-acp-dans-r-prcomp-vs-princomp/
# https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/prcomp
scale_data <- scale(cancer_data[,3:32])
pca_data <- prcomp(scale_data)
str(pca_data); summary(pca_data)
# plotting this PCA
plot(pca_data, xlab="PCn variables")
# use the first three (3) PC1, PC2 and PC3
set_pca_data <- pca_data$x[,1:3]
head(set_pca_data)
#convert pca to data frame (569 rows, 3 columns)
final_data <- data.frame(set_pca_data)
# add the 'diagnostics' column (569 rows)
final_data$diagnosis <- cancer_data$diagnosis
head(final_data)
# scatter plot of matrices (SPLOM)
pairs.panels(final_data)
# Modelling and predicting
# ref. https://www.rdocumentation.org/packages/caret/versions/6.0-86/topics/createDataPartition
#
library(caret)
inTrain <- createDataPartition(y=final_data$diagnosis, p=0.7, list=FALSE)
training_data<- final_data[inTrain,]
test_data<- final_data[-inTrain,]
dim(training_data); dim(test_data)
table(training_data$diagnosis); table(test_data$diagnosis)
training_data
test_data
# Model building and test
# with 4 algorithms : bagging, boosting, neural networks, support vector machines
algoList <- c("bagFDA","LogitBoost","nnet","svmRadialCost")
results <- data.frame(Algorithm=character(), Duration=numeric(), Accuracy=numeric(), stringsAsFactors = FALSE )
for (i in 1:length(algoList)) {
algo <- algoList[i]
print(paste("Alogrithm:", algo))
startTime <- as.integer(Sys.time())
# Build model with 'train' function - ref. https://www.rdocumentation.org/packages/caret/versions/4.47/topics/train
model <- train( diagnosis ~ . , data = training_data , method = algo)
model
# Predict against test data
predicted <- predict(model, test_data)
length(predicted); length(test_data$diagnosis)
# Compare diagnosis prediction with test data
comp <- confusionMatrix(predicted, as.factor(test_data$diagnosis))
# Store the result
stopTime <- as.integer(Sys.time())
result <- c( as.character(algo) , stopTime - startTime , as.numeric(comp$overall[1]) )
print(result)
results[i,1] <- as.character(algo)
results[i,2] <- ( stopTime - startTime )
results[i,3] <- round(as.numeric(comp$overall[1]) * 100, 2)
}
#Print comparison results
results
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_state.R
\name{read_state}
\alias{read_state}
\title{Download shapefiles of Brazilian states as sf objects.}
\usage{
read_state(
code_state = "all",
year = 2010,
tp = "simplified",
showProgress = TRUE
)
}
\arguments{
\item{code_state}{The two-digit code of a state or a two-letter uppercase abbreviation (e.g. 33 or "RJ"). If code_state="all", all states will be loaded.}
\item{year}{Year of the data (defaults to 2010)}
\item{tp}{Whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Default)}
\item{showProgress}{Logical. Defaults to (TRUE) display progress bar}
}
\description{
Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific state at a given year
uf <- read_state(code_state=12, year=2017)
# Read specific state at a given year
uf <- read_state(code_state="SC", year=2000)
# Read all states at a given year
ufs <- read_state(code_state="all", year=2010)
}
}
\seealso{
Other general area functions:
\code{\link{read_amazon}()},
\code{\link{read_biomes}()},
\code{\link{read_census_tract}()},
\code{\link{read_conservation_units}()},
\code{\link{read_country}()},
\code{\link{read_immediate_region}()},
\code{\link{read_intermediate_region}()},
\code{\link{read_meso_region}()},
\code{\link{read_micro_region}()},
\code{\link{read_municipality}()},
\code{\link{read_region}()},
\code{\link{read_semiarid}()},
\code{\link{read_statistical_grid}()},
\code{\link{read_weighting_area}()}
}
\concept{general area functions}
| /r-package/man/read_state.Rd | no_license | Joaobazzo/geobr | R | false | true | 1,676 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_state.R
\name{read_state}
\alias{read_state}
\title{Download shapefiles of Brazilian states as sf objects.}
\usage{
read_state(
code_state = "all",
year = 2010,
tp = "simplified",
showProgress = TRUE
)
}
\arguments{
\item{code_state}{The two-digit code of a state or a two-letter uppercase abbreviation (e.g. 33 or "RJ"). If code_state="all", all states will be loaded.}
\item{year}{Year of the data (defaults to 2010)}
\item{tp}{Whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Default)}
\item{showProgress}{Logical. Defaults to (TRUE) display progress bar}
}
\description{
Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific state at a given year
uf <- read_state(code_state=12, year=2017)
# Read specific state at a given year
uf <- read_state(code_state="SC", year=2000)
# Read all states at a given year
ufs <- read_state(code_state="all", year=2010)
}
}
\seealso{
Other general area functions:
\code{\link{read_amazon}()},
\code{\link{read_biomes}()},
\code{\link{read_census_tract}()},
\code{\link{read_conservation_units}()},
\code{\link{read_country}()},
\code{\link{read_immediate_region}()},
\code{\link{read_intermediate_region}()},
\code{\link{read_meso_region}()},
\code{\link{read_micro_region}()},
\code{\link{read_municipality}()},
\code{\link{read_region}()},
\code{\link{read_semiarid}()},
\code{\link{read_statistical_grid}()},
\code{\link{read_weighting_area}()}
}
\concept{general area functions}
|
#funcoes de graficos
gerar_grafico_serie <- function(tab, municipio_nome){
tab %>%
filter(municipio== municipio_nome) %>%
ggplot() +
geom_line(aes(x = mes_ano, y = vitimas)) +
labs(
x = "Mês",
y = "Número de vítimas",
title = municipio_nome
) +
theme_minimal()
} | /R/graficos.R | no_license | felipelbhering/exemploSinespProjeto | R | false | false | 312 | r | #funcoes de graficos
gerar_grafico_serie <- function(tab, municipio_nome){
tab %>%
filter(municipio== municipio_nome) %>%
ggplot() +
geom_line(aes(x = mes_ano, y = vitimas)) +
labs(
x = "Mês",
y = "Número de vítimas",
title = municipio_nome
) +
theme_minimal()
} |
\name{summary.front41Output}
\alias{summary.front41Output}
\alias{print.summary.front41Output}
\title{Summarizing the Estimation of Frontier 4.1}
\description{
\code{summary.front41Output} summarizes the estimation results
of a model estimated by Frontier 4.1..
}
\usage{
\method{summary}{front41Output}( object, \dots )
\method{print}{summary.front41Output}( x, efficiencies = FALSE, \dots )
}
\arguments{
\item{object}{an object of class \code{front41Output}
(read/created by \code{\link{front41ReadOutput}}.}
\item{x}{object of class \code{summary.front41Output}
(returned by the \code{summary} method for objects of class
\code{front41ReadOutput}).}
\item{efficiencies}{logical. Print all efficiency estimates?
(If \code{FALSE}, only the mean efficiency is printed.)}
\item{\dots}{currently ignored.}
}
\value{
The \code{summary} method returns a list of class
\code{summary.front41Output}
with the same elements as object returned by \code{\link{front41ReadOutput}}.
However, the elements \code{olsResults}, \code{gridResults}, and
\code{mleResults} have an additional culumn with marginal significance
levels (P values).
The \eqn{P} values of the OLS estimates are calculated using
the \eqn{t} distribution,
while the (asymptotic) \eqn{P} values of the ML estimates
are calculated based on the assumption
that their \eqn{t} values follow an (asymptotic) standard normal
distribution.
}
\seealso{\code{\link{front41ReadOutput}}, \code{\link{front41WriteInput}}.}
\author{Arne Henningsen}
\examples{
# read the output file that is provided with Frontier 4.1
outFile <- system.file( "front41/EG1.OUT", package = "frontier" )
sfa <- front41ReadOutput( outFile )
summary( sfa )
}
\keyword{models}
| /man/summary.front41Output.Rd | no_license | cran/frontier | R | false | false | 1,797 | rd | \name{summary.front41Output}
\alias{summary.front41Output}
\alias{print.summary.front41Output}
\title{Summarizing the Estimation of Frontier 4.1}
\description{
\code{summary.front41Output} summarizes the estimation results
of a model estimated by Frontier 4.1..
}
\usage{
\method{summary}{front41Output}( object, \dots )
\method{print}{summary.front41Output}( x, efficiencies = FALSE, \dots )
}
\arguments{
\item{object}{an object of class \code{front41Output}
(read/created by \code{\link{front41ReadOutput}}.}
\item{x}{object of class \code{summary.front41Output}
(returned by the \code{summary} method for objects of class
\code{front41ReadOutput}).}
\item{efficiencies}{logical. Print all efficiency estimates?
(If \code{FALSE}, only the mean efficiency is printed.)}
\item{\dots}{currently ignored.}
}
\value{
The \code{summary} method returns a list of class
\code{summary.front41Output}
with the same elements as object returned by \code{\link{front41ReadOutput}}.
However, the elements \code{olsResults}, \code{gridResults}, and
\code{mleResults} have an additional culumn with marginal significance
levels (P values).
The \eqn{P} values of the OLS estimates are calculated using
the \eqn{t} distribution,
while the (asymptotic) \eqn{P} values of the ML estimates
are calculated based on the assumption
that their \eqn{t} values follow an (asymptotic) standard normal
distribution.
}
\seealso{\code{\link{front41ReadOutput}}, \code{\link{front41WriteInput}}.}
\author{Arne Henningsen}
\examples{
# read the output file that is provided with Frontier 4.1
outFile <- system.file( "front41/EG1.OUT", package = "frontier" )
sfa <- front41ReadOutput( outFile )
summary( sfa )
}
\keyword{models}
|
#' Stream Power
#'
#' This function determines the power of a stream
#'
#' @param Q stream discharge (m^3/s)
#' @param S channel slope
#' @param rho water density (g/m^3) default = 1000
#' @param g acceleration due to gravity (m/s^2) default = 9.8
#'
#' @return stream power (W)
# Create stream power function
stream_power = function(Q, S, rho=1000, g=9.8) {
# if inputs are negative, return message "[input] must be greater than zero" using two different kinds of error checking
Q = ifelse((Q < 0), return("discharge must be greater than zero"), Q)
S = ifelse((S < 0), return("channel slope must be greater than zero"), S)
if (rho < 0) return("rho must be greater than zero")
# calculate stream power
result = rho * g * Q * S
return(result)
}
| /R/stream_power.R | no_license | lauraing/streampower | R | false | false | 760 | r | #' Stream Power
#'
#' This function determines the power of a stream
#'
#' @param Q stream discharge (m^3/s)
#' @param S channel slope
#' @param rho water density (g/m^3) default = 1000
#' @param g acceleration due to gravity (m/s^2) default = 9.8
#'
#' @return stream power (W)
# Create stream power function
stream_power = function(Q, S, rho=1000, g=9.8) {
# if inputs are negative, return message "[input] must be greater than zero" using two different kinds of error checking
Q = ifelse((Q < 0), return("discharge must be greater than zero"), Q)
S = ifelse((S < 0), return("channel slope must be greater than zero"), S)
if (rho < 0) return("rho must be greater than zero")
# calculate stream power
result = rho * g * Q * S
return(result)
}
|
ginfo<-function(what=NULL,obj=NULL) {
if (GAMLj_INFO) {
if (!is.null(what))
print(what)
if (!is.null(obj)) {
print(obj)
cat("------------\n")
}
}
}
mark<-function(what=NULL,obj=NULL) {
if (GAMLj_DEBUG) {
if (!is.null(what))
print(what)
else print("you got here")
if (!is.null(obj)) {
print(obj)
print("#### end ###")
}
}
}
.listdeep<-function(aList,n=0) {
if (!inherits(aList,"list"))
return(n)
max(sapply(aList,.listdeep,n+1))
}
.keepShape<-function(mat) {
if (is.null(dim(mat)))
mat<-t(as.matrix(mat))
mat
} | /R/functions.R | no_license | dustinfife/gamlj | R | false | false | 611 | r |
ginfo<-function(what=NULL,obj=NULL) {
if (GAMLj_INFO) {
if (!is.null(what))
print(what)
if (!is.null(obj)) {
print(obj)
cat("------------\n")
}
}
}
mark<-function(what=NULL,obj=NULL) {
if (GAMLj_DEBUG) {
if (!is.null(what))
print(what)
else print("you got here")
if (!is.null(obj)) {
print(obj)
print("#### end ###")
}
}
}
.listdeep<-function(aList,n=0) {
if (!inherits(aList,"list"))
return(n)
max(sapply(aList,.listdeep,n+1))
}
.keepShape<-function(mat) {
if (is.null(dim(mat)))
mat<-t(as.matrix(mat))
mat
} |
##Load the test data
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset/test")
x <- read.table("X_test.txt")
y <- read.table("y_test.txt")
s <- read.table("subject_test.txt")
##Load the training data
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset/train")
xtr <- read.table("X_train.txt")
ytr <- read.table("y_train.txt")
str <- read.table("subject_train.txt")
#1. Merge the test/training data
xdf <- rbind(x, xtr) #measurement
ydf <- rbind(y, ytr) #activity
sdf <- rbind(s, str) #subject
##Get feature names
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset")
fd <- read.table("features.txt")
as.character(fd[,2])
##Get activity names
actname <- read.table("activity_labels.txt")
as.character(actname[,2])
#2. Extract test mean/sd data
selected <- grep("-(mean|std).*",fd[,2])
#4. Appropriately Label the data with descriptive variable name
##Select the variable name columns
selectedColN <- fd[selected,2]
#Delete the symbols and Replace mean/std with Mean/Std
selectedColN <- gsub("[()-]","",selectedColN)
selectedColN <- gsub("mean","Mean",selectedColN)
selectedColN <- gsub("std","Std",selectedColN)
#3. Name the activity in the data
##Merge the selected columns, subject, and activity
alldata <- cbind(sdf, ydf, xdf[selected])
names(alldata) <- make.names(c("subject", "activity", selectedColN))
##Replace the number with the activity names
alldata$activity <- factor(alldata$activity, levels = actname[,1], labels = actname[,2])
#5. Calculate the mean of each variable for each activity and subject
alld <- melt(alldata, id.vars = c("subject","activity"))
td <- acast(alld, subject + activity ~ variable, mean)
#5. Write a tidy data set as csv
write.csv(td, file = "tidydata.csv", quote = F)
| /run_analysis.R | no_license | Na1929/W4_Project | R | false | false | 1,786 | r | ##Load the test data
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset/test")
x <- read.table("X_test.txt")
y <- read.table("y_test.txt")
s <- read.table("subject_test.txt")
##Load the training data
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset/train")
xtr <- read.table("X_train.txt")
ytr <- read.table("y_train.txt")
str <- read.table("subject_train.txt")
#1. Merge the test/training data
xdf <- rbind(x, xtr) #measurement
ydf <- rbind(y, ytr) #activity
sdf <- rbind(s, str) #subject
##Get feature names
setwd("~/Google ドライブ/Git/datasciencecoursera/UCI HAR Dataset")
fd <- read.table("features.txt")
as.character(fd[,2])
##Get activity names
actname <- read.table("activity_labels.txt")
as.character(actname[,2])
#2. Extract test mean/sd data
selected <- grep("-(mean|std).*",fd[,2])
#4. Appropriately Label the data with descriptive variable name
##Select the variable name columns
selectedColN <- fd[selected,2]
#Delete the symbols and Replace mean/std with Mean/Std
selectedColN <- gsub("[()-]","",selectedColN)
selectedColN <- gsub("mean","Mean",selectedColN)
selectedColN <- gsub("std","Std",selectedColN)
#3. Name the activity in the data
##Merge the selected columns, subject, and activity
alldata <- cbind(sdf, ydf, xdf[selected])
names(alldata) <- make.names(c("subject", "activity", selectedColN))
##Replace the number with the activity names
alldata$activity <- factor(alldata$activity, levels = actname[,1], labels = actname[,2])
#5. Calculate the mean of each variable for each activity and subject
alld <- melt(alldata, id.vars = c("subject","activity"))
td <- acast(alld, subject + activity ~ variable, mean)
#5. Write a tidy data set as csv
write.csv(td, file = "tidydata.csv", quote = F)
|
B <- 36 # size of filename alphabet
# Closed-form formula for sum(nchar(filename_for_index(0..(n-1)))), the sum of
# the first n filenames generated by filename_for_index.
#
# First, pretend like filename_for_index generates a zero-length
# filename at index 0, and shift the other indices by 1. This doesn't
# affect the sum of lengths, and makes the formula more regular. Now, of
# the first n filenames, all but 1 are at least 1 byte long, all but
# 36+1 are at least 2 bytes long, all but 36²+36+1 are at least 3 bytes
# long, and so on. In general, for a length i, all but 36^(i-1)+...+1 =
# (36^i-1)/35 (a base-36 repunit) filenames are at least i bytes long.
# The greatest value of i that does not exceed the length of the nth
# filename is
# d = floor(log_36(n/(36/35))) + 1
# For each value of i, we add 1 byte for each filename that is at least
# i bytes long.
# Σ_i=1…d n - (36^i-1)/35
# = dn - Σ_i=1…d (36^i-1)/35
# = dn - ((36^d-1)*36/35² - d/35)
# where the last equality comes from adapting a formula for the sum of
# base-10 repunits: https://oeis.org/A014824.
sum_filename_lengths <- function(n) {
n <- n + 1 # Shift by 1 index for a fictitious zero-length filename.
B1 <- B - 1
# Length of the longest base-B repunit not greater than n
d <- floor(log(n/(B/B1), B)) + 1
d*n - ((B^d-1)/B1 * B/B1 - d/B1)
}
# Calculates sum(nchar(filename_for_index(i)) * i) for i in 0..(n-1), but in a
# faster way (O(log n)) than just doing the sum.
triangular_sum_filename_lengths <- Vectorize(function(n) {
s <- 0
m <- 1
r <- -1
# s is the running sum. m is the current filename length (starts at 1).
# r keeps track of the starting point for weighting the summation in
# each iteration; it starts at -1, not 0, because we are weighting the
# filenames by 0:(n-1), not 1:n.
while (n > 0) {
# x is the number of filenames of length m: either B^m, or
# however many remain.
x <- min(n, B^m)
# x * (x+1)/2 is 1 + 2 + ... + x.
# x * ((x+1)/2 + r) is (r+1) + (r+2) + ... + (r+x).
s <- s + m * x * ((x + 1) / 2 + r)
# We've handled all x filenames of length m.
n <- n - x
# Next iteration, offset weights by the number of filenames we
# have just processed.
r <- r + x
# Next filename length.
m <- m + 1
}
s
})
argmax_fn <- function(range, f) {
range[[which.max(f(range))]]
}
# find the smallest value for which f returns true, or else NA
bsearch_fn <- function(low, high, f) {
low <- floor(low)
hight <- ceiling(high)
while (is.na(high) || low < high) {
if (is.na(high)) {
mid <- low * 2
} else {
mid <- floor((low + high) / 2)
}
if (!f(mid)) {
low <- mid + 1
} else {
high <- mid
}
}
ifelse(f(low), low, NA)
}
# elementwise min and max of each element of a vector with a bound.
cmin <- function(v, bound) {
sapply(v, function(e) min(e, bound))
}
cmax <- function(v, bound) {
sapply(v, function(e) max(e, bound))
}
lfh <- 30 # size of a local file header
cdh <- 46 # size of a central directory header
eocd <- 22 # size of an end of central directory record
quot_ext <- 4 # size of a an extra field tag/length header (for quoting)
quot_def <- 5 # size of a DEFLATE non-compressed block header (for quoting)
lfh_zip64 <- lfh+20
cdh_zip64 <- cdh+12
eocd_zip64 <- eocd+56+20 # Zip64 end of central directory record and end of central directory locator
# See how many files we can quote using extra fields rather than DEFLATE
# non-compressed blocks.
max_num_quot_ext <- bsearch_fn(1, NA, function(n) {
n*lfh + (n-1)*quot_ext + sum_filename_lengths(n) > 65535
}) - 1
max_num_quot_ext_zip64 <- bsearch_fn(1, NA, function(n) {
n*lfh_zip64 + (n-1)*quot_ext + sum_filename_lengths(n) > 65535
}) - 1
DEFLATE_uncompressed_size_given_compressed_size <- function(compressed_size) {
# This relies on specific knowledge of how bulk_deflate works,
# specifically that the prefix and suffix are together 16 bytes long
# and automatically represent 1033 uncompressed bytes by themselves
# (1+258+258 in the prefix and 258+258 in the suffix).
1033 + (compressed_size-16) * 1032
}
DEFLATE_compressed_size_given_uncompressed_size <- function(uncompressed_size) {
# Account for prefix and suffix.
15 + 1 + ceiling((uncompressed_size-1-1032) / 1032)
}
DEFLATE_uncompressed_size_given_max_uncompressed_size <- function(max_uncompressed_size) {
# bulk_deflate will get within 258 of max_uncompressed_size (accounting
# for the 1 literal byte at the beginning).
max_uncompressed_size - ((max_uncompressed_size-1) %% 258)
}
BZIP2_uncompressed_size_given_compressed_size <- function(compressed_size) {
(compressed_size-4-10) * 45899235 / 32
}
BZIP2_compressed_size_given_max_compressed_size <- function(max_compressed_size) {
4 + ((max_compressed_size-4-10) %/% 32) * 32 + 10
}
BZIP2_uncompressed_size_given_max_uncompressed_size <- function(max_compressed_size) {
(max_compressed_size %/% 45899235) * 45899235
}
BZIP2_compressed_size_given_uncompressed_size <- function(uncompressed_size) {
uncompressed_size * 32 / 45899235 + 4 + 10
}
BZIP2_compressed_size_given_max_uncompressed_size <- function(max_uncompressed_size) {
4 + (max_uncompressed_size %/% 45899235) * 32 + 10
}
## no overlap construction
NONE_headers_size <- function(num_files) {
num_files*(lfh+cdh) + 2*sum_filename_lengths(num_files) + eocd
}
NONE_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size*num_files + NONE_headers_size(num_files)
}
NONE_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files) {
num_files*uncompressed_size
}
NONE_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
NONE_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
NONE_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
NONE_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
NONE_64_headers_size <- function(num_files) {
num_files*(lfh_zip64+cdh_zip64) + 2*sum_filename_lengths(num_files) + eocd_zip64
}
NONE_64_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size*num_files + NONE_64_headers_size(num_files)
}
## full overlap construction
FULL_headers_size <- function(num_files) {
lfh + num_files*cdh + sum_filename_lengths(1) + sum_filename_lengths(num_files) + eocd
}
FULL_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size + FULL_headers_size(num_files)
}
FULL_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files) {
num_files*uncompressed_size
}
FULL_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
FULL_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
FULL_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
FULL_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
FULL_64_headers_size <- function(num_files) {
lfh_zip64 + num_files*cdh_zip64 + sum_filename_lengths(1) + sum_filename_lengths(num_files) + eocd_zip64
}
FULL_64_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size + FULL_64_headers_size(num_files)
}
## quoted overlap construction
QUOTED_headers_size <- function(num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*(lfh+cdh) + num_quot_ext*quot_ext + num_quot_def*quot_def + 2*sum_filename_lengths(num_files) + eocd
}
QUOTED_zipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
compressed_size + QUOTED_headers_size(num_files, extra_quoting)
}
QUOTED_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*uncompressed_size + num_quot_ext*num_quot_def*lfh + num_quot_def*(num_quot_def+1)/2*lfh + triangular_sum_filename_lengths(num_files) - triangular_sum_filename_lengths(num_quot_ext+1)
}
QUOTED_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(max_uncompressed_size)
compressed_size <- DEFLATE_compressed_size_given_uncompressed_size(uncompressed_size)
QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_64_headers_size <- function(num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext_zip64) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*(lfh_zip64+cdh_zip64) + num_quot_ext*quot_ext + num_quot_def*quot_def + 2*sum_filename_lengths(num_files) + eocd_zip64
}
QUOTED_64_zipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
compressed_size + QUOTED_64_headers_size(num_files, extra_quoting)
}
QUOTED_64_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext_zip64) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*uncompressed_size + num_quot_ext*num_quot_def*lfh_zip64 + num_quot_def*(num_quot_def+1)/2*lfh_zip64 + triangular_sum_filename_lengths(num_files) - triangular_sum_filename_lengths(num_quot_ext+1)
}
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_64_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_64_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_64_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_zipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
compressed_size <- BZIP2_compressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
compressed_size <- BZIP2_compressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
## optimization
FULL_DEFLATE_optimize_for_zipped_size <- function(zipped_size) {
num_files_opt <- argmax_fn(1:(zipped_size/cdh), function(num_files) {
compressed_size <- zipped_size - FULL_headers_size(num_files)
FULL_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- zipped_size - FULL_headers_size(num_files_opt)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_DEFLATE_64_optimize_for_zipped_size <- function(zipped_size) {
num_files_opt <- argmax_fn(1:(zipped_size/cdh), function(num_files) {
compressed_size <- zipped_size - FULL_64_headers_size(num_files)
FULL_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_64_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- zipped_size - FULL_64_headers_size(num_files_opt)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_BZIP2_optimize_for_max_zipped_size <- function(max_zipped_size) {
num_files_opt <- argmax_fn(1:(max_zipped_size/cdh), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_headers_size(num_files))
FULL_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_headers_size(num_files_opt))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_BZIP2_64_optimize_for_max_zipped_size <- function(max_zipped_size) {
num_files_opt <- argmax_fn(1:(max_zipped_size/cdh), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_64_headers_size(num_files))
FULL_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_64_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_64_headers_size(num_files_opt))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_DEFLATE_optimize_for_zipped_size <- function(zipped_size, extra_quoting=FALSE) {
num_files_opt <- argmax_fn(1:(zipped_size/(lfh+cdh+quot_ext)), function(num_files) {
compressed_size <- zipped_size - QUOTED_headers_size(num_files, extra_quoting)
QUOTED_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- zipped_size - QUOTED_headers_size(num_files_opt, extra_quoting)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_DEFLATE_optimize_for_num_files <- function(num_files, extra_quoting=FALSE) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
# 2^32 - 2 is the maximum representable file size. (Not 2^32 - 1, because Go archive/zip formerly (https://github.com/golang/go/issues/14185) and yauzl 2.10.0 still (https://github.com/thejoshwolfe/yauzl/blob/2.10.0/index.js#L333-L348) takes it to mean Zip64 headers must be present.)
# lfh*(num_quot_def) is the file size increase from quoting num_quot_def local file headers.
# sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def) is the file size increase from DEFLATE quoting of filenames.
max_uncompressed_size <- 2^32 - 2 - (lfh*num_quot_def + sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def))
# The compression ratio is not monotonic in max_uncompressed_size. Omitting one
# pair of 0 bits decreases the zipped size by 258*65535 ≈ 17 MB, but it is
# worth it if omitting those bits saves one byte in the DEFLATE suffix.
# So try our absolute maximum limit minus 0, 258, 516, 774.
max_uncompressed_size_opt <- argmax_fn(seq(max_uncompressed_size, max_uncompressed_size-1032, -258), function(max_uncompressed_size) {
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size(max_uncompressed_size, num_files, extra_quoting) / QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size(max_uncompressed_size, num_files, extra_quoting)
})
list(max_uncompressed_size=max_uncompressed_size_opt, num_files=num_files)
}
QUOTED_DEFLATE_64_optimize_for_zipped_size <- function(zipped_size, extra_quoting=FALSE) {
max_num_files <- (zipped_size-eocd_zip64)/(lfh_zip64+cdh_zip64+quot_ext)
low <- max(0, floor(max_num_files/2 - max(max_num_files * 0.05, 10)))
high <- min(max_num_files, floor(max_num_files/2 + max(max_num_files * 0.05, 10)))
num_files_opt <- argmax_fn(low:high, function(num_files) {
compressed_size <- zipped_size - QUOTED_64_headers_size(num_files, extra_quoting)
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_64_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- zipped_size - QUOTED_64_headers_size(num_files_opt, extra_quoting)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_BZIP2_optimize_for_max_zipped_size <- function(max_zipped_size, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
num_files_opt <- argmax_fn(1:(max_zipped_size/(lfh+cdh+quot_ext)), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_headers_size(num_files, extra_quoting))
QUOTED_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_headers_size(num_files_opt, extra_quoting))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_BZIP2_optimize_for_num_files <- function(num_files, extra_quoting=TRUE) {
if (!extra_quoting)
stop()
# bzip2 allows only extra-field quoting, not DEFLATE quoting, so there is no file size expansion.
list(max_uncompressed_size=2^32 - 2, num_files=num_files)
}
QUOTED_BZIP2_64_optimize_for_max_zipped_size <- function(max_zipped_size, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
num_files_opt <- argmax_fn(1:(max_zipped_size/(lfh_zip64+cdh_zip64+quot_ext)), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_64_headers_size(num_files, extra_quoting))
QUOTED_BZIP2_64_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_64_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_64_headers_size(num_files_opt, extra_quoting))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
# output
FULL_DEFLATE_needs_zip64 <- function(compressed_size, num_files) {
(num_files > 0xfffe) | (DEFLATE_uncompressed_size_given_compressed_size(compressed_size) > 0xfffffffe)
}
FULL_BZIP2_needs_zip64 <- function(compressed_size, num_files) {
(num_files > 0xfffe) | (BZIP2_uncompressed_size_given_compressed_size(compressed_size) > 0xfffffffe)
}
QUOTED_DEFLATE_largest_file <- function(compressed_size, num_files, extra_quoting=FALSE) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
DEFLATE_uncompressed_size_given_compressed_size(compressed_size) + lfh*num_quot_def + sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def)
}
QUOTED_DEFLATE_needs_zip64 <- function(compressed_size, num_files, extra_quoting=FALSE) {
(num_files > 0xfffe) | (QUOTED_DEFLATE_largest_file(compressed_size, num_files, extra_quoting) > 0xfffffffe)
}
QUOTED_BZIP2_largest_file <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
# no file size expansion with bzip2, because of only extra-field quoting
BZIP2_uncompressed_size_given_compressed_size(compressed_size)
}
QUOTED_BZIP2_needs_zip64 <- function(compressed_size, num_files, extra_quoting=FALSE) {
QUOTED_BZIP2_largest_file(compressed_size, num_files, extra_quoting) > 0xfffffffe
}
options(stringsAsFactors=FALSE, width=120)
data <- data.frame(class=c(), zipped_size=c(), unzipped_size=c(), label=c())
record <- function(class, zipped_size, unzipped_size, label="") {
# print(c(class, zipped_size, unzipped_size, label))
data <<- rbind(data, list(class=class, zipped_size=zipped_size, unzipped_size=unzipped_size, label=label))
}
geom_seq <- function(low, high, m) {
v <- c()
while (low < high) {
v <- append(v, floor(low))
low <- low * m
}
v <- append(v, high)
v
}
# geometric spacing up to about 2 GB.
points <- floor(250*1.5^(0:39))
# none_deflate
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(0xfffffffe)
compressed_size <- DEFLATE_compressed_size_given_uncompressed_size(uncompressed_size)
for (num_files in c(2^(0:15), 65534)) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, num_files)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files)
record("none_deflate", zipped_size, unzipped_size)
}
compressed_size <- 250
while (TRUE) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 0xfffffffe)
break
record("none_deflate", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
compressed_size <- 250
while (TRUE) {
zipped_size <- NONE_64_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 2^62)
break
record("none_deflate_zip64", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
# none_bzip2
uncompressed_size <- BZIP2_uncompressed_size_given_max_uncompressed_size(0xfffffffe)
compressed_size <- BZIP2_compressed_size_given_uncompressed_size(uncompressed_size)
for (num_files in c(2^(0:15), 65534)) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, num_files)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files)
record("none_bzip2", zipped_size, unzipped_size)
}
max_compressed_size <- 250
while (TRUE) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_compressed_size)
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 0xfffffffe)
break
record("none_bzip2", zipped_size, unzipped_size)
max_compressed_size <- floor(max_compressed_size * 1.5)
}
max_compressed_size <- 250
while (TRUE) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_compressed_size)
zipped_size <- NONE_64_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 2^62)
break
record("none_bzip2_zip64", zipped_size, unzipped_size)
max_compressed_size <- floor(max_compressed_size * 1.5)
}
# full_deflate
for (zipped_size in points) {
params <- FULL_DEFLATE_optimize_for_zipped_size(zipped_size)
if (zipped_size != FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
if (FULL_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
break
unzipped_size <- FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_deflate", zipped_size, unzipped_size)
}
for (zipped_size in points) {
params <- FULL_DEFLATE_64_optimize_for_zipped_size(zipped_size)
if (zipped_size != FULL_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
unzipped_size <- FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_deflate_zip64", zipped_size, unzipped_size)
}
# full_bzip2
for (max_zipped_size in points) {
params <- FULL_BZIP2_optimize_for_max_zipped_size(max_zipped_size)
if (FULL_BZIP2_needs_zip64(params$compressed_size, params$num_files))
break
zipped_size <- FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files)
unzipped_size <- FULL_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_bzip2", zipped_size, unzipped_size)
}
for (max_zipped_size in points) {
params <- FULL_BZIP2_64_optimize_for_max_zipped_size(max_zipped_size)
zipped_size <- FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files)
unzipped_size <- FULL_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_bzip2_zip64", zipped_size, unzipped_size)
}
# quoted_deflate
for (zipped_size in points) {
params <- QUOTED_DEFLATE_optimize_for_zipped_size(zipped_size)
if (zipped_size != QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
break
unzipped_size <- QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("quoted_deflate", zipped_size, unzipped_size)
}
# 8319377 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(8319377-1)), {
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
stop("unexpected")
record(
"quoted_deflate",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
)
})
for (zipped_size in points) {
params <- QUOTED_DEFLATE_64_optimize_for_zipped_size(zipped_size)
if (zipped_size != QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
unzipped_size <- QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("quoted_deflate_zip64", zipped_size, unzipped_size)
}
# quoted_deflate_extra
for (zipped_size in points) {
params <- QUOTED_DEFLATE_optimize_for_zipped_size(zipped_size, extra_quoting=TRUE)
if (zipped_size != QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
break
unzipped_size <- QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_deflate_extra", zipped_size, unzipped_size)
}
# 8317713 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(8317713-1)), {
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
stop("unexpected")
record(
"quoted_deflate_extra",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
)
})
for (zipped_size in points) {
params <- QUOTED_DEFLATE_64_optimize_for_zipped_size(zipped_size, extra_quoting=TRUE)
if (zipped_size != QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
unzipped_size <- QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_deflate_zip64_extra", zipped_size, unzipped_size)
}
# quoted_bzip2_extra
for (max_zipped_size in points) {
params <- QUOTED_BZIP2_optimize_for_max_zipped_size(max_zipped_size, extra_quoting=TRUE)
if (params$num_files > max_num_quot_ext || QUOTED_BZIP2_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
break
zipped_size <- QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_extra", zipped_size, unzipped_size)
}
# 5828 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_BZIP2_optimize_for_max_zipped_size(5828-1, extra_quoting=TRUE)), {
if (QUOTED_BZIP2_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
record(
"quoted_bzip2_extra",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE),
QUOTED_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
)
})
for (num_files in geom_seq(36, max_num_quot_ext+1, 1.5)) {
params <- QUOTED_BZIP2_optimize_for_num_files(num_files, extra_quoting=TRUE)
zipped_size <- QUOTED_BZIP2_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_extra", zipped_size, unzipped_size)
}
for (max_zipped_size in points) {
params <- QUOTED_BZIP2_64_optimize_for_max_zipped_size(max_zipped_size, extra_quoting=TRUE)
if (params$num_files > max_num_quot_ext_zip64+1)
break
zipped_size <- QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_zip64_extra", zipped_size, unzipped_size)
}
compressed_size <- params$compressed_size
while (TRUE) {
zipped_size <- QUOTED_64_zipped_size_given_compressed_size(compressed_size, max_num_quot_ext_zip64+1, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_64_unzipped_size_given_compressed_size(compressed_size, max_num_quot_ext_zip64+1, extra_quoting=TRUE)
if (unzipped_size > 2^62)
break
record("quoted_bzip2_zip64_extra", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
# specific examplars we want represented exactly
record("42_nonrec", 42374, 558432, "42.zip")
record("42_rec", 42374, 4507981343026016, "42.zip")
with(list(params=FULL_DEFLATE_optimize_for_zipped_size(42374)), record(
"full_deflate",
FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"overlap.zip"
))
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(42374)), record(
"quoted_deflate",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"zbsm.zip"
))
with(list(params=QUOTED_DEFLATE_optimize_for_num_files(65534)), record(
"quoted_deflate",
QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files),
"zblg.zip"
))
with(list(params=QUOTED_DEFLATE_64_optimize_for_zipped_size(45876952)), record(
"quoted_deflate_zip64",
QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"zbxl.zip"
))
with(list(params=QUOTED_BZIP2_optimize_for_num_files(1809, extra_quoting=TRUE)), record(
"quoted_bzip2_extra",
QUOTED_BZIP2_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE),
QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE),
"zbbz2.zip"
))
# with(list(params=QUOTED_DEFLATE_64_optimize_for_zipped_size(2961656712)), record(
# "quoted_deflate_zip64",
# QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
# QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
# "zbxxl.zip"
# ))
data <- data[data$zipped_size > 0 & data$unzipped_size > 0, ]
write.csv(data, row.names=FALSE)
| /data/zipped_size.R | no_license | ZerosunGitHub/zipbomb-paper | R | false | false | 32,877 | r | B <- 36 # size of filename alphabet
# Closed-form formula for sum(nchar(filename_for_index(0..(n-1)))), the sum of
# the first n filenames generated by filename_for_index.
#
# First, pretend like filename_for_index generates a zero-length
# filename at index 0, and shift the other indices by 1. This doesn't
# affect the sum of lengths, and makes the formula more regular. Now, of
# the first n filenames, all but 1 are at least 1 byte long, all but
# 36+1 are at least 2 bytes long, all but 36²+36+1 are at least 3 bytes
# long, and so on. In general, for a length i, all but 36^(i-1)+...+1 =
# (36^i-1)/35 (a base-36 repunit) filenames are at least i bytes long.
# The greatest value of i that does not exceed the length of the nth
# filename is
# d = floor(log_36(n/(36/35))) + 1
# For each value of i, we add 1 byte for each filename that is at least
# i bytes long.
# Σ_i=1…d n - (36^i-1)/35
# = dn - Σ_i=1…d (36^i-1)/35
# = dn - ((36^d-1)*36/35² - d/35)
# where the last equality comes from adapting a formula for the sum of
# base-10 repunits: https://oeis.org/A014824.
sum_filename_lengths <- function(n) {
n <- n + 1 # Shift by 1 index for a fictitious zero-length filename.
B1 <- B - 1
# Length of the longest base-B repunit not greater than n
d <- floor(log(n/(B/B1), B)) + 1
d*n - ((B^d-1)/B1 * B/B1 - d/B1)
}
# Calculates sum(nchar(filename_for_index(i)) * i) for i in 0..(n-1), but in a
# faster way (O(log n)) than just doing the sum.
triangular_sum_filename_lengths <- Vectorize(function(n) {
s <- 0
m <- 1
r <- -1
# s is the running sum. m is the current filename length (starts at 1).
# r keeps track of the starting point for weighting the summation in
# each iteration; it starts at -1, not 0, because we are weighting the
# filenames by 0:(n-1), not 1:n.
while (n > 0) {
# x is the number of filenames of length m: either B^m, or
# however many remain.
x <- min(n, B^m)
# x * (x+1)/2 is 1 + 2 + ... + x.
# x * ((x+1)/2 + r) is (r+1) + (r+2) + ... + (r+x).
s <- s + m * x * ((x + 1) / 2 + r)
# We've handled all x filenames of length m.
n <- n - x
# Next iteration, offset weights by the number of filenames we
# have just processed.
r <- r + x
# Next filename length.
m <- m + 1
}
s
})
argmax_fn <- function(range, f) {
range[[which.max(f(range))]]
}
# find the smallest value for which f returns true, or else NA
bsearch_fn <- function(low, high, f) {
low <- floor(low)
hight <- ceiling(high)
while (is.na(high) || low < high) {
if (is.na(high)) {
mid <- low * 2
} else {
mid <- floor((low + high) / 2)
}
if (!f(mid)) {
low <- mid + 1
} else {
high <- mid
}
}
ifelse(f(low), low, NA)
}
# elementwise min and max of each element of a vector with a bound.
cmin <- function(v, bound) {
sapply(v, function(e) min(e, bound))
}
cmax <- function(v, bound) {
sapply(v, function(e) max(e, bound))
}
lfh <- 30 # size of a local file header
cdh <- 46 # size of a central directory header
eocd <- 22 # size of an end of central directory record
quot_ext <- 4 # size of a an extra field tag/length header (for quoting)
quot_def <- 5 # size of a DEFLATE non-compressed block header (for quoting)
lfh_zip64 <- lfh+20
cdh_zip64 <- cdh+12
eocd_zip64 <- eocd+56+20 # Zip64 end of central directory record and end of central directory locator
# See how many files we can quote using extra fields rather than DEFLATE
# non-compressed blocks.
max_num_quot_ext <- bsearch_fn(1, NA, function(n) {
n*lfh + (n-1)*quot_ext + sum_filename_lengths(n) > 65535
}) - 1
max_num_quot_ext_zip64 <- bsearch_fn(1, NA, function(n) {
n*lfh_zip64 + (n-1)*quot_ext + sum_filename_lengths(n) > 65535
}) - 1
DEFLATE_uncompressed_size_given_compressed_size <- function(compressed_size) {
# This relies on specific knowledge of how bulk_deflate works,
# specifically that the prefix and suffix are together 16 bytes long
# and automatically represent 1033 uncompressed bytes by themselves
# (1+258+258 in the prefix and 258+258 in the suffix).
1033 + (compressed_size-16) * 1032
}
DEFLATE_compressed_size_given_uncompressed_size <- function(uncompressed_size) {
# Account for prefix and suffix.
15 + 1 + ceiling((uncompressed_size-1-1032) / 1032)
}
DEFLATE_uncompressed_size_given_max_uncompressed_size <- function(max_uncompressed_size) {
# bulk_deflate will get within 258 of max_uncompressed_size (accounting
# for the 1 literal byte at the beginning).
max_uncompressed_size - ((max_uncompressed_size-1) %% 258)
}
BZIP2_uncompressed_size_given_compressed_size <- function(compressed_size) {
(compressed_size-4-10) * 45899235 / 32
}
BZIP2_compressed_size_given_max_compressed_size <- function(max_compressed_size) {
4 + ((max_compressed_size-4-10) %/% 32) * 32 + 10
}
BZIP2_uncompressed_size_given_max_uncompressed_size <- function(max_compressed_size) {
(max_compressed_size %/% 45899235) * 45899235
}
BZIP2_compressed_size_given_uncompressed_size <- function(uncompressed_size) {
uncompressed_size * 32 / 45899235 + 4 + 10
}
BZIP2_compressed_size_given_max_uncompressed_size <- function(max_uncompressed_size) {
4 + (max_uncompressed_size %/% 45899235) * 32 + 10
}
## no overlap construction
NONE_headers_size <- function(num_files) {
num_files*(lfh+cdh) + 2*sum_filename_lengths(num_files) + eocd
}
NONE_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size*num_files + NONE_headers_size(num_files)
}
NONE_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files) {
num_files*uncompressed_size
}
NONE_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
NONE_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
NONE_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
NONE_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
NONE_64_headers_size <- function(num_files) {
num_files*(lfh_zip64+cdh_zip64) + 2*sum_filename_lengths(num_files) + eocd_zip64
}
NONE_64_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size*num_files + NONE_64_headers_size(num_files)
}
## full overlap construction
FULL_headers_size <- function(num_files) {
lfh + num_files*cdh + sum_filename_lengths(1) + sum_filename_lengths(num_files) + eocd
}
FULL_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size + FULL_headers_size(num_files)
}
FULL_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files) {
num_files*uncompressed_size
}
FULL_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
FULL_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
FULL_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files) {
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
FULL_unzipped_size_given_uncompressed_size(uncompressed_size, num_files)
}
FULL_64_headers_size <- function(num_files) {
lfh_zip64 + num_files*cdh_zip64 + sum_filename_lengths(1) + sum_filename_lengths(num_files) + eocd_zip64
}
FULL_64_zipped_size_given_compressed_size <- function(compressed_size, num_files) {
compressed_size + FULL_64_headers_size(num_files)
}
## quoted overlap construction
QUOTED_headers_size <- function(num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*(lfh+cdh) + num_quot_ext*quot_ext + num_quot_def*quot_def + 2*sum_filename_lengths(num_files) + eocd
}
QUOTED_zipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
compressed_size + QUOTED_headers_size(num_files, extra_quoting)
}
QUOTED_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*uncompressed_size + num_quot_ext*num_quot_def*lfh + num_quot_def*(num_quot_def+1)/2*lfh + triangular_sum_filename_lengths(num_files) - triangular_sum_filename_lengths(num_quot_ext+1)
}
QUOTED_DEFLATE_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(max_uncompressed_size)
compressed_size <- DEFLATE_compressed_size_given_uncompressed_size(uncompressed_size)
QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_64_headers_size <- function(num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext_zip64) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*(lfh_zip64+cdh_zip64) + num_quot_ext*quot_ext + num_quot_def*quot_def + 2*sum_filename_lengths(num_files) + eocd_zip64
}
QUOTED_64_zipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
compressed_size + QUOTED_64_headers_size(num_files, extra_quoting)
}
QUOTED_64_unzipped_size_given_uncompressed_size <- function(uncompressed_size, num_files, extra_quoting) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext_zip64) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
num_files*uncompressed_size + num_quot_ext*num_quot_def*lfh_zip64 + num_quot_def*(num_quot_def+1)/2*lfh_zip64 + triangular_sum_filename_lengths(num_files) - triangular_sum_filename_lengths(num_quot_ext+1)
}
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
uncompressed_size <- DEFLATE_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_64_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_64_unzipped_size_given_compressed_size <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
uncompressed_size <- BZIP2_uncompressed_size_given_compressed_size(compressed_size)
QUOTED_64_unzipped_size_given_uncompressed_size(uncompressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_zipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
compressed_size <- BZIP2_compressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size <- function(max_uncompressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
compressed_size <- BZIP2_compressed_size_given_max_uncompressed_size(max_uncompressed_size)
QUOTED_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
}
## optimization
FULL_DEFLATE_optimize_for_zipped_size <- function(zipped_size) {
num_files_opt <- argmax_fn(1:(zipped_size/cdh), function(num_files) {
compressed_size <- zipped_size - FULL_headers_size(num_files)
FULL_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- zipped_size - FULL_headers_size(num_files_opt)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_DEFLATE_64_optimize_for_zipped_size <- function(zipped_size) {
num_files_opt <- argmax_fn(1:(zipped_size/cdh), function(num_files) {
compressed_size <- zipped_size - FULL_64_headers_size(num_files)
FULL_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_64_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- zipped_size - FULL_64_headers_size(num_files_opt)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_BZIP2_optimize_for_max_zipped_size <- function(max_zipped_size) {
num_files_opt <- argmax_fn(1:(max_zipped_size/cdh), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_headers_size(num_files))
FULL_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_headers_size(num_files_opt))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
FULL_BZIP2_64_optimize_for_max_zipped_size <- function(max_zipped_size) {
num_files_opt <- argmax_fn(1:(max_zipped_size/cdh), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_64_headers_size(num_files))
FULL_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files) / FULL_64_zipped_size_given_compressed_size(compressed_size, num_files)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - FULL_64_headers_size(num_files_opt))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_DEFLATE_optimize_for_zipped_size <- function(zipped_size, extra_quoting=FALSE) {
num_files_opt <- argmax_fn(1:(zipped_size/(lfh+cdh+quot_ext)), function(num_files) {
compressed_size <- zipped_size - QUOTED_headers_size(num_files, extra_quoting)
QUOTED_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- zipped_size - QUOTED_headers_size(num_files_opt, extra_quoting)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_DEFLATE_optimize_for_num_files <- function(num_files, extra_quoting=FALSE) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
# 2^32 - 2 is the maximum representable file size. (Not 2^32 - 1, because Go archive/zip formerly (https://github.com/golang/go/issues/14185) and yauzl 2.10.0 still (https://github.com/thejoshwolfe/yauzl/blob/2.10.0/index.js#L333-L348) takes it to mean Zip64 headers must be present.)
# lfh*(num_quot_def) is the file size increase from quoting num_quot_def local file headers.
# sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def) is the file size increase from DEFLATE quoting of filenames.
max_uncompressed_size <- 2^32 - 2 - (lfh*num_quot_def + sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def))
# The compression ratio is not monotonic in max_uncompressed_size. Omitting one
# pair of 0 bits decreases the zipped size by 258*65535 ≈ 17 MB, but it is
# worth it if omitting those bits saves one byte in the DEFLATE suffix.
# So try our absolute maximum limit minus 0, 258, 516, 774.
max_uncompressed_size_opt <- argmax_fn(seq(max_uncompressed_size, max_uncompressed_size-1032, -258), function(max_uncompressed_size) {
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size(max_uncompressed_size, num_files, extra_quoting) / QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size(max_uncompressed_size, num_files, extra_quoting)
})
list(max_uncompressed_size=max_uncompressed_size_opt, num_files=num_files)
}
QUOTED_DEFLATE_64_optimize_for_zipped_size <- function(zipped_size, extra_quoting=FALSE) {
max_num_files <- (zipped_size-eocd_zip64)/(lfh_zip64+cdh_zip64+quot_ext)
low <- max(0, floor(max_num_files/2 - max(max_num_files * 0.05, 10)))
high <- min(max_num_files, floor(max_num_files/2 + max(max_num_files * 0.05, 10)))
num_files_opt <- argmax_fn(low:high, function(num_files) {
compressed_size <- zipped_size - QUOTED_64_headers_size(num_files, extra_quoting)
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_64_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- zipped_size - QUOTED_64_headers_size(num_files_opt, extra_quoting)
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_BZIP2_optimize_for_max_zipped_size <- function(max_zipped_size, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
num_files_opt <- argmax_fn(1:(max_zipped_size/(lfh+cdh+quot_ext)), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_headers_size(num_files, extra_quoting))
QUOTED_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_headers_size(num_files_opt, extra_quoting))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
QUOTED_BZIP2_optimize_for_num_files <- function(num_files, extra_quoting=TRUE) {
if (!extra_quoting)
stop()
# bzip2 allows only extra-field quoting, not DEFLATE quoting, so there is no file size expansion.
list(max_uncompressed_size=2^32 - 2, num_files=num_files)
}
QUOTED_BZIP2_64_optimize_for_max_zipped_size <- function(max_zipped_size, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
num_files_opt <- argmax_fn(1:(max_zipped_size/(lfh_zip64+cdh_zip64+quot_ext)), function(num_files) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_64_headers_size(num_files, extra_quoting))
QUOTED_BZIP2_64_unzipped_size_given_compressed_size(compressed_size, num_files, extra_quoting) / QUOTED_64_zipped_size_given_compressed_size(compressed_size, num_files, extra_quoting)
})
compressed_size_opt <- BZIP2_compressed_size_given_max_compressed_size(max_zipped_size - QUOTED_64_headers_size(num_files_opt, extra_quoting))
list(compressed_size=compressed_size_opt, num_files=num_files_opt)
}
# output
FULL_DEFLATE_needs_zip64 <- function(compressed_size, num_files) {
(num_files > 0xfffe) | (DEFLATE_uncompressed_size_given_compressed_size(compressed_size) > 0xfffffffe)
}
FULL_BZIP2_needs_zip64 <- function(compressed_size, num_files) {
(num_files > 0xfffe) | (BZIP2_uncompressed_size_given_compressed_size(compressed_size) > 0xfffffffe)
}
QUOTED_DEFLATE_largest_file <- function(compressed_size, num_files, extra_quoting=FALSE) {
num_quot_ext <- if (extra_quoting) cmin(num_files - 1, max_num_quot_ext) else 0
num_quot_def <- cmax(num_files - 1 - num_quot_ext, 0)
DEFLATE_uncompressed_size_given_compressed_size(compressed_size) + lfh*num_quot_def + sum_filename_lengths(num_files) - sum_filename_lengths(num_files-num_quot_def)
}
QUOTED_DEFLATE_needs_zip64 <- function(compressed_size, num_files, extra_quoting=FALSE) {
(num_files > 0xfffe) | (QUOTED_DEFLATE_largest_file(compressed_size, num_files, extra_quoting) > 0xfffffffe)
}
QUOTED_BZIP2_largest_file <- function(compressed_size, num_files, extra_quoting=FALSE) {
if (!extra_quoting)
stop()
# no file size expansion with bzip2, because of only extra-field quoting
BZIP2_uncompressed_size_given_compressed_size(compressed_size)
}
QUOTED_BZIP2_needs_zip64 <- function(compressed_size, num_files, extra_quoting=FALSE) {
QUOTED_BZIP2_largest_file(compressed_size, num_files, extra_quoting) > 0xfffffffe
}
options(stringsAsFactors=FALSE, width=120)
data <- data.frame(class=c(), zipped_size=c(), unzipped_size=c(), label=c())
record <- function(class, zipped_size, unzipped_size, label="") {
# print(c(class, zipped_size, unzipped_size, label))
data <<- rbind(data, list(class=class, zipped_size=zipped_size, unzipped_size=unzipped_size, label=label))
}
geom_seq <- function(low, high, m) {
v <- c()
while (low < high) {
v <- append(v, floor(low))
low <- low * m
}
v <- append(v, high)
v
}
# geometric spacing up to about 2 GB.
points <- floor(250*1.5^(0:39))
# none_deflate
uncompressed_size <- DEFLATE_uncompressed_size_given_max_uncompressed_size(0xfffffffe)
compressed_size <- DEFLATE_compressed_size_given_uncompressed_size(uncompressed_size)
for (num_files in c(2^(0:15), 65534)) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, num_files)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, num_files)
record("none_deflate", zipped_size, unzipped_size)
}
compressed_size <- 250
while (TRUE) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 0xfffffffe)
break
record("none_deflate", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
compressed_size <- 250
while (TRUE) {
zipped_size <- NONE_64_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_DEFLATE_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 2^62)
break
record("none_deflate_zip64", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
# none_bzip2
uncompressed_size <- BZIP2_uncompressed_size_given_max_uncompressed_size(0xfffffffe)
compressed_size <- BZIP2_compressed_size_given_uncompressed_size(uncompressed_size)
for (num_files in c(2^(0:15), 65534)) {
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, num_files)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, num_files)
record("none_bzip2", zipped_size, unzipped_size)
}
max_compressed_size <- 250
while (TRUE) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_compressed_size)
zipped_size <- NONE_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 0xfffffffe)
break
record("none_bzip2", zipped_size, unzipped_size)
max_compressed_size <- floor(max_compressed_size * 1.5)
}
max_compressed_size <- 250
while (TRUE) {
compressed_size <- BZIP2_compressed_size_given_max_compressed_size(max_compressed_size)
zipped_size <- NONE_64_zipped_size_given_compressed_size(compressed_size, 1)
unzipped_size <- NONE_BZIP2_unzipped_size_given_compressed_size(compressed_size, 1)
if (unzipped_size > 2^62)
break
record("none_bzip2_zip64", zipped_size, unzipped_size)
max_compressed_size <- floor(max_compressed_size * 1.5)
}
# full_deflate
for (zipped_size in points) {
params <- FULL_DEFLATE_optimize_for_zipped_size(zipped_size)
if (zipped_size != FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
if (FULL_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
break
unzipped_size <- FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_deflate", zipped_size, unzipped_size)
}
for (zipped_size in points) {
params <- FULL_DEFLATE_64_optimize_for_zipped_size(zipped_size)
if (zipped_size != FULL_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
unzipped_size <- FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_deflate_zip64", zipped_size, unzipped_size)
}
# full_bzip2
for (max_zipped_size in points) {
params <- FULL_BZIP2_optimize_for_max_zipped_size(max_zipped_size)
if (FULL_BZIP2_needs_zip64(params$compressed_size, params$num_files))
break
zipped_size <- FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files)
unzipped_size <- FULL_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_bzip2", zipped_size, unzipped_size)
}
for (max_zipped_size in points) {
params <- FULL_BZIP2_64_optimize_for_max_zipped_size(max_zipped_size)
zipped_size <- FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files)
unzipped_size <- FULL_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("full_bzip2_zip64", zipped_size, unzipped_size)
}
# quoted_deflate
for (zipped_size in points) {
params <- QUOTED_DEFLATE_optimize_for_zipped_size(zipped_size)
if (zipped_size != QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
break
unzipped_size <- QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("quoted_deflate", zipped_size, unzipped_size)
}
# 8319377 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(8319377-1)), {
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
stop("unexpected")
record(
"quoted_deflate",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
)
})
for (zipped_size in points) {
params <- QUOTED_DEFLATE_64_optimize_for_zipped_size(zipped_size)
if (zipped_size != QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files))
stop("unexpected")
unzipped_size <- QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
record("quoted_deflate_zip64", zipped_size, unzipped_size)
}
# quoted_deflate_extra
for (zipped_size in points) {
params <- QUOTED_DEFLATE_optimize_for_zipped_size(zipped_size, extra_quoting=TRUE)
if (zipped_size != QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
break
unzipped_size <- QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_deflate_extra", zipped_size, unzipped_size)
}
# 8317713 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(8317713-1)), {
if (QUOTED_DEFLATE_needs_zip64(params$compressed_size, params$num_files))
stop("unexpected")
record(
"quoted_deflate_extra",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files)
)
})
for (zipped_size in points) {
params <- QUOTED_DEFLATE_64_optimize_for_zipped_size(zipped_size, extra_quoting=TRUE)
if (zipped_size != QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
unzipped_size <- QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_deflate_zip64_extra", zipped_size, unzipped_size)
}
# quoted_bzip2_extra
for (max_zipped_size in points) {
params <- QUOTED_BZIP2_optimize_for_max_zipped_size(max_zipped_size, extra_quoting=TRUE)
if (params$num_files > max_num_quot_ext || QUOTED_BZIP2_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
break
zipped_size <- QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_extra", zipped_size, unzipped_size)
}
# 5828 is smallest zipped_size that requires Zip64, from optimize.R.
with(list(params=QUOTED_BZIP2_optimize_for_max_zipped_size(5828-1, extra_quoting=TRUE)), {
if (QUOTED_BZIP2_needs_zip64(params$compressed_size, params$num_files, extra_quoting=TRUE))
stop("unexpected")
record(
"quoted_bzip2_extra",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE),
QUOTED_BZIP2_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
)
})
for (num_files in geom_seq(36, max_num_quot_ext+1, 1.5)) {
params <- QUOTED_BZIP2_optimize_for_num_files(num_files, extra_quoting=TRUE)
zipped_size <- QUOTED_BZIP2_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_extra", zipped_size, unzipped_size)
}
for (max_zipped_size in points) {
params <- QUOTED_BZIP2_64_optimize_for_max_zipped_size(max_zipped_size, extra_quoting=TRUE)
if (params$num_files > max_num_quot_ext_zip64+1)
break
zipped_size <- QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files, extra_quoting=TRUE)
record("quoted_bzip2_zip64_extra", zipped_size, unzipped_size)
}
compressed_size <- params$compressed_size
while (TRUE) {
zipped_size <- QUOTED_64_zipped_size_given_compressed_size(compressed_size, max_num_quot_ext_zip64+1, extra_quoting=TRUE)
unzipped_size <- QUOTED_BZIP2_64_unzipped_size_given_compressed_size(compressed_size, max_num_quot_ext_zip64+1, extra_quoting=TRUE)
if (unzipped_size > 2^62)
break
record("quoted_bzip2_zip64_extra", zipped_size, unzipped_size)
compressed_size <- floor(compressed_size * 1.5)
}
# specific examplars we want represented exactly
record("42_nonrec", 42374, 558432, "42.zip")
record("42_rec", 42374, 4507981343026016, "42.zip")
with(list(params=FULL_DEFLATE_optimize_for_zipped_size(42374)), record(
"full_deflate",
FULL_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
FULL_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"overlap.zip"
))
with(list(params=QUOTED_DEFLATE_optimize_for_zipped_size(42374)), record(
"quoted_deflate",
QUOTED_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"zbsm.zip"
))
with(list(params=QUOTED_DEFLATE_optimize_for_num_files(65534)), record(
"quoted_deflate",
QUOTED_DEFLATE_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files),
QUOTED_DEFLATE_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files),
"zblg.zip"
))
with(list(params=QUOTED_DEFLATE_64_optimize_for_zipped_size(45876952)), record(
"quoted_deflate_zip64",
QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
"zbxl.zip"
))
with(list(params=QUOTED_BZIP2_optimize_for_num_files(1809, extra_quoting=TRUE)), record(
"quoted_bzip2_extra",
QUOTED_BZIP2_zipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE),
QUOTED_BZIP2_unzipped_size_given_max_uncompressed_size(params$max_uncompressed_size, params$num_files, extra_quoting=TRUE),
"zbbz2.zip"
))
# with(list(params=QUOTED_DEFLATE_64_optimize_for_zipped_size(2961656712)), record(
# "quoted_deflate_zip64",
# QUOTED_64_zipped_size_given_compressed_size(params$compressed_size, params$num_files),
# QUOTED_DEFLATE_64_unzipped_size_given_compressed_size(params$compressed_size, params$num_files),
# "zbxxl.zip"
# ))
data <- data[data$zipped_size > 0 & data$unzipped_size > 0, ]
write.csv(data, row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/url.R
\name{shape_url}
\alias{shape_url}
\title{shape_url}
\usage{
shape_url(base_url, ...)
}
\description{
add query parameter, user agent and current verify to the url
}
| /man/shape_url.Rd | permissive | benjaminguinaudeau/tiktokr | R | false | true | 250 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/url.R
\name{shape_url}
\alias{shape_url}
\title{shape_url}
\usage{
shape_url(base_url, ...)
}
\description{
add query parameter, user agent and current verify to the url
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBConnection.R
\name{dbGetQuery}
\alias{dbGetQuery}
\alias{dbGetQuery,DBIConnection,character-method}
\title{Send query, retrieve results and then clear result set.}
\usage{
dbGetQuery(conn, statement, ...)
}
\arguments{
\item{conn}{A \code{\linkS4class{DBIConnection}} object, as produced by
\code{\link{dbConnect}}.}
\item{statement}{a character vector of length 1 containing SQL.}
\item{...}{Other parameters passed on to methods.}
}
\description{
\code{dbGetQuery} comes with a default implementation that calls
\code{\link{dbSendQuery}}, then \code{\link{dbFetch}}, ensuring that
the result is always free-d by \code{\link{dbClearResult}}.
}
\section{Implementation notes}{
Subclasses should override this method only if they provide some sort of
performance optimisation.
}
\examples{
if (require("RSQLite")) {
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
dbGetQuery(con, "SELECT * FROM mtcars")
dbBegin(con)
dbGetQuery(con, "DELETE FROM mtcars WHERE cyl == 4")
dbRollback(con)
dbDisconnect(con)
}
}
\seealso{
Other connection methods: \code{\link{dbDisconnect}},
\code{\link{dbExecute}}, \code{\link{dbExistsTable}},
\code{\link{dbGetChunkedQuery}},
\code{\link{dbGetException}}, \code{\link{dbListFields}},
\code{\link{dbListResults}}, \code{\link{dbListTables}},
\code{\link{dbReadTable}}, \code{\link{dbRemoveTable}},
\code{\link{dbSendQuery}}
}
| /man/dbGetQuery.Rd | no_license | bborgesr/DBI | R | false | true | 1,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBConnection.R
\name{dbGetQuery}
\alias{dbGetQuery}
\alias{dbGetQuery,DBIConnection,character-method}
\title{Send query, retrieve results and then clear result set.}
\usage{
dbGetQuery(conn, statement, ...)
}
\arguments{
\item{conn}{A \code{\linkS4class{DBIConnection}} object, as produced by
\code{\link{dbConnect}}.}
\item{statement}{a character vector of length 1 containing SQL.}
\item{...}{Other parameters passed on to methods.}
}
\description{
\code{dbGetQuery} comes with a default implementation that calls
\code{\link{dbSendQuery}}, then \code{\link{dbFetch}}, ensuring that
the result is always free-d by \code{\link{dbClearResult}}.
}
\section{Implementation notes}{
Subclasses should override this method only if they provide some sort of
performance optimisation.
}
\examples{
if (require("RSQLite")) {
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
dbGetQuery(con, "SELECT * FROM mtcars")
dbBegin(con)
dbGetQuery(con, "DELETE FROM mtcars WHERE cyl == 4")
dbRollback(con)
dbDisconnect(con)
}
}
\seealso{
Other connection methods: \code{\link{dbDisconnect}},
\code{\link{dbExecute}}, \code{\link{dbExistsTable}},
\code{\link{dbGetChunkedQuery}},
\code{\link{dbGetException}}, \code{\link{dbListFields}},
\code{\link{dbListResults}}, \code{\link{dbListTables}},
\code{\link{dbReadTable}}, \code{\link{dbRemoveTable}},
\code{\link{dbSendQuery}}
}
|
#' Generate an annual stack of forest change from GFC product
#'
#' Uses thresholded GFC data as output by \code{\link{threshold_gfc}} to make
#' an annualized layer stack of forest change. See Details for the class codes
#' used in the annual raster stack. The \code{\link{animate_annual}} function
#' can be used to produce an animation of forest change from the generated
#' layer stack.
#'
#' The output raster stack uses the following codes to describe forest change
#' at each pixel:
#' \tabular{lc}{
#' Nodata \tab 0 \cr
#' Forest \tab 1 \cr
#' Non-forest \tab 2 \cr
#' Forest loss \tab 3 \cr
#' Forest gain \tab 4 \cr
#' Forest loss and gain \tab 5 \cr
#' Water \tab 6 \cr
#' }
#'
#' @seealso \code{\link{threshold_gfc}}, \code{\link{animate_annual}}
#'
#' @export
#' @import raster
#' @param gfc thresholded extract of GFC product for a given AOI (see
#' \code{\link{threshold_gfc}})
#' @param dataset which version of the Hansen data to use
annual_stack <- function(gfc, dataset='GFC-2022-v1.10') {
data_year <- as.numeric(str_extract(dataset, '(?<=GFC-?)[0-9]{4}'))
names(gfc) <- c('forest2000', 'lossyear', 'gain', 'lossgain', 'datamask')
out <- raster(gfc)
layer_names <- paste0('y', seq(2000, data_year))
for (n in 1:length(layer_names)) {
if (n == 1) {
# Code forest as 1, non-forest as 2
this_year <- gfc$forest2000
this_year[this_year == 0] <- 2 # non-forest
} else {
this_year <- raster(out, layer=(n-1))
# Code forest loss (can't have loss in the first year, 2000)
this_year[(gfc$lossyear == n) & !(gfc$gain)] <- 3 # loss
}
# Code gain (no years are attributed to gain). Gain can only occur
# where loss does not also occur (loss == 0), as gain and loss is coded
# separately below.
this_year[gfc$gain & (gfc$lossyear == 0)] <- 4 # gain
this_year[gfc$lossgain] <- 5 # loss and gain
this_year[gfc$datamask == 2] <- 6 # water
names(this_year) <- layer_names[n]
out <- addLayer(out, this_year)
}
out[gfc$datamask == 0] <- 0 # missing
out <- setZ(out, as.Date(paste0(seq(2000, data_year), '-1-1')))
names(out) <- layer_names
return(out)
}
| /R/annual_stack.R | no_license | azvoleff/gfcanalysis | R | false | false | 2,363 | r | #' Generate an annual stack of forest change from GFC product
#'
#' Uses thresholded GFC data as output by \code{\link{threshold_gfc}} to make
#' an annualized layer stack of forest change. See Details for the class codes
#' used in the annual raster stack. The \code{\link{animate_annual}} function
#' can be used to produce an animation of forest change from the generated
#' layer stack.
#'
#' The output raster stack uses the following codes to describe forest change
#' at each pixel:
#' \tabular{lc}{
#' Nodata \tab 0 \cr
#' Forest \tab 1 \cr
#' Non-forest \tab 2 \cr
#' Forest loss \tab 3 \cr
#' Forest gain \tab 4 \cr
#' Forest loss and gain \tab 5 \cr
#' Water \tab 6 \cr
#' }
#'
#' @seealso \code{\link{threshold_gfc}}, \code{\link{animate_annual}}
#'
#' @export
#' @import raster
#' @param gfc thresholded extract of GFC product for a given AOI (see
#' \code{\link{threshold_gfc}})
#' @param dataset which version of the Hansen data to use
annual_stack <- function(gfc, dataset='GFC-2022-v1.10') {
data_year <- as.numeric(str_extract(dataset, '(?<=GFC-?)[0-9]{4}'))
names(gfc) <- c('forest2000', 'lossyear', 'gain', 'lossgain', 'datamask')
out <- raster(gfc)
layer_names <- paste0('y', seq(2000, data_year))
for (n in 1:length(layer_names)) {
if (n == 1) {
# Code forest as 1, non-forest as 2
this_year <- gfc$forest2000
this_year[this_year == 0] <- 2 # non-forest
} else {
this_year <- raster(out, layer=(n-1))
# Code forest loss (can't have loss in the first year, 2000)
this_year[(gfc$lossyear == n) & !(gfc$gain)] <- 3 # loss
}
# Code gain (no years are attributed to gain). Gain can only occur
# where loss does not also occur (loss == 0), as gain and loss is coded
# separately below.
this_year[gfc$gain & (gfc$lossyear == 0)] <- 4 # gain
this_year[gfc$lossgain] <- 5 # loss and gain
this_year[gfc$datamask == 2] <- 6 # water
names(this_year) <- layer_names[n]
out <- addLayer(out, this_year)
}
out[gfc$datamask == 0] <- 0 # missing
out <- setZ(out, as.Date(paste0(seq(2000, data_year), '-1-1')))
names(out) <- layer_names
return(out)
}
|
#' Establish a connection to the DataRobot modeling engine
#'
#' This function initializes a DataRobot session. To use DataRobot, you must connect to
#' your account. This can be done in three ways:
#' \itemize{
#' \item by passing an \code{endpoint} and \code{token} directly to \code{ConnectToDataRobot}
#' \item by having a YAML config file in $HOME/.config/datarobot/drconfig.yaml
#' \item by setting DATAROBOT_API_ENDPOINT and DATAROBOT_API_TOKEN environment variables
#' }
#' The three methods of authentication are given priority in that order (explicitly passing
#' parameters to the function will trump a YAML config file, which will trump the environment
#' variables.)
#' If you have a YAML config file or environment variables set, you will not need to
#' pass any parameters to \code{ConnectToDataRobot} in order to connect.
#'
#' @param endpoint character. URL specifying the DataRobot server to be used.
#' It depends on DataRobot modeling engine implementation (cloud-based, on-prem...) you are using.
#' Contact your DataRobot admin for endpoint to use and to turn on API access to your account.
#' The endpoint for DataRobot cloud accounts is https://app.datarobot.com/api/v2
#' @param token character. DataRobot API access token. It is unique for each DataRobot modeling
#' engine account and can be accessed using DataRobot webapp in Account profile section.
#' @param userAgentSuffix character. Additional text that is appended to the
#' User-Agent HTTP header when communicating with the DataRobot REST API. This
#' can be useful for identifying different applications that are built on top
#' of the DataRobot Python Client, which can aid debugging and help track
#' usage.
#' @param sslVerify logical. Whether to check the SSL certificate. Either
#' TRUE to check (default), FALSE to not check.
#' @param configPath character. Path to YAML config file specifying configuration
#' (token and endpoint).
#' @param username character. No longer supported.
#' @param password character. No longer supported.
#' @examples
#' \dontrun{
#' ConnectToDataRobot("https://app.datarobot.com/api/v2", "thisismyfaketoken")
#' ConnectToDataRobot(configPath = "~/.config/datarobot/drconfig.yaml")
#' }
#' @export
ConnectToDataRobot <- function(endpoint = NULL,
token = NULL,
username = NULL,
password = NULL,
userAgentSuffix = NULL,
sslVerify = TRUE,
configPath = NULL
) {
# Check environment variables
envEndpoint <- Sys.getenv("DATAROBOT_API_ENDPOINT", unset = NA)
envToken <- Sys.getenv("DATAROBOT_API_TOKEN", unset = NA)
# If the user provides a token, save it to the environment
# variable DATAROBOT_API_TOKEN and call ListProjects to verify it
haveToken <- !is.null(token)
haveUsernamePassword <- (!is.null(username)) || (!is.null(password))
haveConfigPath <- !is.null(configPath)
numAuthMethodsProvided <- haveToken + haveConfigPath + haveUsernamePassword
if (!is.null(userAgentSuffix)) {
SaveUserAgentSuffix(userAgentSuffix)
}
SaveSSLVerifyPreference(sslVerify)
if (numAuthMethodsProvided > 1) {
stop("Please provide only one of: config file or token.")
} else if (haveToken) {
ConnectWithToken(endpoint, token)
} else if (haveUsernamePassword) {
ConnectWithUsernamePassword(endpoint, username, password)
} else if (haveConfigPath) {
ConnectWithConfigFile(configPath)
} else if (!is.na(envEndpoint) & !is.na(envToken)) {
ConnectWithToken(envEndpoint, envToken)
} else {
errorMsg <- "No authentication method provided."
stop(strwrap(errorMsg), call. = FALSE)
}
}
GetDefaultConfigPath <- function() {
file.path(Sys.getenv("HOME"), ".config", "datarobot", "drconfig.yaml")
}
ConnectWithConfigFile <- function(configPath) {
config <- yaml::yaml.load_file(configPath)
# Since the options we get from the config come in snake_case, but ConnectToDataRobot()
# wants camelCase arguments, we manually map the config options to their correct argument.
# We _could_ do this programmatically, but with the small number of options we support,
# it doesn't seem worth it.
if (!is.null(config$ssl_verify) &&
(length(config$ssl_verify) != 1 || !is.logical(config$ssl_verify))) {
stop("ssl_verify must be either unset or set as either TRUE or FALSE.")
}
ConnectToDataRobot(endpoint = config$endpoint, token = config$token, username = config$username,
password = config$password, userAgentSuffix = config$user_agent_suffix,
sslVerify = config$ssl_verify)
}
SetSSLVerification <- function() {
sslVerify <- Sys.getenv("DataRobot_SSL_Verify")
if (identical(sslVerify, "FALSE")) {
httr::set_config(httr::config(ssl_verifypeer = 0L, ssl_verifyhost = 0L))
}
}
ConnectWithToken <- function(endpoint, token) {
authHead <- paste("Token", token, sep = " ")
subUrl <- paste("/", "projects/", sep = "")
fullURL <- paste(endpoint, subUrl, sep = "")
SetSSLVerification()
rawReturn <- httr::GET(fullURL, DataRobotAddHeaders(Authorization = authHead))
newURL <- gsub(subUrl, "", rawReturn$url)
StopIfDenied(rawReturn)
if (!grepl(endpoint, rawReturn$url, fixed = TRUE)) {
errorMsg <- paste("Specified endpoint ", endpoint, " is not correct.
Was redirected to ", newURL, sep = "")
stop(errorMsg, call. = FALSE)
}
out <- SaveConnectionEnvironmentVars(endpoint, token)
VersionWarning()
RStudioConnectionOpened(endpoint, token)
invisible(out)
}
ConnectWithUsernamePassword <- function(endpoint, username, password) {
stop("Using your username/password to authenticate with the DataRobot API is no longer supported.
Please supply your API token instead. You can find your API token in your account profile in
the DataRobot web app.")
}
SaveConnectionEnvironmentVars <- function(endpoint, token) {
message("Authentication token saved")
Sys.setenv(DATAROBOT_API_ENDPOINT = endpoint)
Sys.setenv(DATAROBOT_API_TOKEN = token)
}
SaveUserAgentSuffix <- function(suffix) {
Sys.setenv(DataRobot_User_Agent_Suffix = suffix)
}
SaveSSLVerifyPreference <- function(sslVerify) {
if (!is.null(sslVerify)) {
if (length(sslVerify) != 1 || !is.logical(sslVerify)) {
stop("sslVerify must be unset or be TRUE or FALSE.")
}
Sys.setenv(DataRobot_SSL_Verify = sslVerify)
}
}
StopIfDenied <- function(rawReturn) {
returnStatus <- httr::status_code(rawReturn)
if (returnStatus >= 400) {
response <- unlist(ParseReturnResponse(rawReturn))
errorMsg <- paste("Authorization request denied: ", response)
stop(strwrap(errorMsg), call. = FALSE)
}
}
VersionWarning <- function() {
clientVer <- GetClientVersion()
serverVer <- GetServerVersion()
if (is.null(serverVer)) {
invisible(NULL)
}
if (clientVer$major != serverVer$major) {
errMsg <-
paste("\n Client and server versions are incompatible. \n Server version: ",
serverVer$versionString, "\n Client version: ", clientVer)
stop(errMsg)
}
if (clientVer$minor > serverVer$minor) {
warMsg <-
paste("Client version is ahead of server version, you may have incompatibilities")
warning(warMsg, call. = FALSE)
}
}
GetServerVersion <- function() {
dataRobotUrl <- Sys.getenv("DATAROBOT_API_ENDPOINT")
errorMessage <-
paste("Server did not reply with an API version. This may indicate the endpoint ", dataRobotUrl,
"\n is misconfigured, or that the server API version precedes this version \n ",
"of the DataRobot client package and is likely incompatible.")
ver <- tryCatch({routeString <- UrlJoin("version")
modelInfo <- DataRobotGET(routeString, addUrl = TRUE)
},
ConfigError = function(e) {
warning(errorMessage)
ver <- NULL
})
}
GetClientVersion <- function() {
ver <- packageVersion("datarobot")
}
| /R/ConnectToDataRobot.R | no_license | bgreenwell/datarobot | R | false | false | 8,005 | r | #' Establish a connection to the DataRobot modeling engine
#'
#' This function initializes a DataRobot session. To use DataRobot, you must connect to
#' your account. This can be done in three ways:
#' \itemize{
#' \item by passing an \code{endpoint} and \code{token} directly to \code{ConnectToDataRobot}
#' \item by having a YAML config file in $HOME/.config/datarobot/drconfig.yaml
#' \item by setting DATAROBOT_API_ENDPOINT and DATAROBOT_API_TOKEN environment variables
#' }
#' The three methods of authentication are given priority in that order (explicitly passing
#' parameters to the function will trump a YAML config file, which will trump the environment
#' variables.)
#' If you have a YAML config file or environment variables set, you will not need to
#' pass any parameters to \code{ConnectToDataRobot} in order to connect.
#'
#' @param endpoint character. URL specifying the DataRobot server to be used.
#' It depends on DataRobot modeling engine implementation (cloud-based, on-prem...) you are using.
#' Contact your DataRobot admin for endpoint to use and to turn on API access to your account.
#' The endpoint for DataRobot cloud accounts is https://app.datarobot.com/api/v2
#' @param token character. DataRobot API access token. It is unique for each DataRobot modeling
#' engine account and can be accessed using DataRobot webapp in Account profile section.
#' @param userAgentSuffix character. Additional text that is appended to the
#' User-Agent HTTP header when communicating with the DataRobot REST API. This
#' can be useful for identifying different applications that are built on top
#' of the DataRobot Python Client, which can aid debugging and help track
#' usage.
#' @param sslVerify logical. Whether to check the SSL certificate. Either
#' TRUE to check (default), FALSE to not check.
#' @param configPath character. Path to YAML config file specifying configuration
#' (token and endpoint).
#' @param username character. No longer supported.
#' @param password character. No longer supported.
#' @examples
#' \dontrun{
#' ConnectToDataRobot("https://app.datarobot.com/api/v2", "thisismyfaketoken")
#' ConnectToDataRobot(configPath = "~/.config/datarobot/drconfig.yaml")
#' }
#' @export
ConnectToDataRobot <- function(endpoint = NULL,
token = NULL,
username = NULL,
password = NULL,
userAgentSuffix = NULL,
sslVerify = TRUE,
configPath = NULL
) {
# Check environment variables
envEndpoint <- Sys.getenv("DATAROBOT_API_ENDPOINT", unset = NA)
envToken <- Sys.getenv("DATAROBOT_API_TOKEN", unset = NA)
# If the user provides a token, save it to the environment
# variable DATAROBOT_API_TOKEN and call ListProjects to verify it
haveToken <- !is.null(token)
haveUsernamePassword <- (!is.null(username)) || (!is.null(password))
haveConfigPath <- !is.null(configPath)
numAuthMethodsProvided <- haveToken + haveConfigPath + haveUsernamePassword
if (!is.null(userAgentSuffix)) {
SaveUserAgentSuffix(userAgentSuffix)
}
SaveSSLVerifyPreference(sslVerify)
if (numAuthMethodsProvided > 1) {
stop("Please provide only one of: config file or token.")
} else if (haveToken) {
ConnectWithToken(endpoint, token)
} else if (haveUsernamePassword) {
ConnectWithUsernamePassword(endpoint, username, password)
} else if (haveConfigPath) {
ConnectWithConfigFile(configPath)
} else if (!is.na(envEndpoint) & !is.na(envToken)) {
ConnectWithToken(envEndpoint, envToken)
} else {
errorMsg <- "No authentication method provided."
stop(strwrap(errorMsg), call. = FALSE)
}
}
GetDefaultConfigPath <- function() {
file.path(Sys.getenv("HOME"), ".config", "datarobot", "drconfig.yaml")
}
ConnectWithConfigFile <- function(configPath) {
config <- yaml::yaml.load_file(configPath)
# Since the options we get from the config come in snake_case, but ConnectToDataRobot()
# wants camelCase arguments, we manually map the config options to their correct argument.
# We _could_ do this programmatically, but with the small number of options we support,
# it doesn't seem worth it.
if (!is.null(config$ssl_verify) &&
(length(config$ssl_verify) != 1 || !is.logical(config$ssl_verify))) {
stop("ssl_verify must be either unset or set as either TRUE or FALSE.")
}
ConnectToDataRobot(endpoint = config$endpoint, token = config$token, username = config$username,
password = config$password, userAgentSuffix = config$user_agent_suffix,
sslVerify = config$ssl_verify)
}
SetSSLVerification <- function() {
sslVerify <- Sys.getenv("DataRobot_SSL_Verify")
if (identical(sslVerify, "FALSE")) {
httr::set_config(httr::config(ssl_verifypeer = 0L, ssl_verifyhost = 0L))
}
}
ConnectWithToken <- function(endpoint, token) {
authHead <- paste("Token", token, sep = " ")
subUrl <- paste("/", "projects/", sep = "")
fullURL <- paste(endpoint, subUrl, sep = "")
SetSSLVerification()
rawReturn <- httr::GET(fullURL, DataRobotAddHeaders(Authorization = authHead))
newURL <- gsub(subUrl, "", rawReturn$url)
StopIfDenied(rawReturn)
if (!grepl(endpoint, rawReturn$url, fixed = TRUE)) {
errorMsg <- paste("Specified endpoint ", endpoint, " is not correct.
Was redirected to ", newURL, sep = "")
stop(errorMsg, call. = FALSE)
}
out <- SaveConnectionEnvironmentVars(endpoint, token)
VersionWarning()
RStudioConnectionOpened(endpoint, token)
invisible(out)
}
ConnectWithUsernamePassword <- function(endpoint, username, password) {
stop("Using your username/password to authenticate with the DataRobot API is no longer supported.
Please supply your API token instead. You can find your API token in your account profile in
the DataRobot web app.")
}
SaveConnectionEnvironmentVars <- function(endpoint, token) {
message("Authentication token saved")
Sys.setenv(DATAROBOT_API_ENDPOINT = endpoint)
Sys.setenv(DATAROBOT_API_TOKEN = token)
}
SaveUserAgentSuffix <- function(suffix) {
Sys.setenv(DataRobot_User_Agent_Suffix = suffix)
}
SaveSSLVerifyPreference <- function(sslVerify) {
if (!is.null(sslVerify)) {
if (length(sslVerify) != 1 || !is.logical(sslVerify)) {
stop("sslVerify must be unset or be TRUE or FALSE.")
}
Sys.setenv(DataRobot_SSL_Verify = sslVerify)
}
}
StopIfDenied <- function(rawReturn) {
returnStatus <- httr::status_code(rawReturn)
if (returnStatus >= 400) {
response <- unlist(ParseReturnResponse(rawReturn))
errorMsg <- paste("Authorization request denied: ", response)
stop(strwrap(errorMsg), call. = FALSE)
}
}
VersionWarning <- function() {
clientVer <- GetClientVersion()
serverVer <- GetServerVersion()
if (is.null(serverVer)) {
invisible(NULL)
}
if (clientVer$major != serverVer$major) {
errMsg <-
paste("\n Client and server versions are incompatible. \n Server version: ",
serverVer$versionString, "\n Client version: ", clientVer)
stop(errMsg)
}
if (clientVer$minor > serverVer$minor) {
warMsg <-
paste("Client version is ahead of server version, you may have incompatibilities")
warning(warMsg, call. = FALSE)
}
}
GetServerVersion <- function() {
dataRobotUrl <- Sys.getenv("DATAROBOT_API_ENDPOINT")
errorMessage <-
paste("Server did not reply with an API version. This may indicate the endpoint ", dataRobotUrl,
"\n is misconfigured, or that the server API version precedes this version \n ",
"of the DataRobot client package and is likely incompatible.")
ver <- tryCatch({routeString <- UrlJoin("version")
modelInfo <- DataRobotGET(routeString, addUrl = TRUE)
},
ConfigError = function(e) {
warning(errorMessage)
ver <- NULL
})
}
GetClientVersion <- function() {
ver <- packageVersion("datarobot")
}
|
SolverLP<-function(model,method="CPLEX_LP",decimal=0)
{
library(Rglpk)
model1.lp<-Rglpk_read_file(model,type=method,
verbose=F)
model1.lp.sol<-Rglpk_solve_LP(model1.lp$objective,
model1.lp$constraints[[1]],model1.lp$constraints
[[2]],model1.lp$constraints[[3]],model1.lp$bounds,
model1.lp$types,model1.lp$maximum)
library(xtable)
model1.lp.sol.df<-as.data.frame(model1.lp.sol$solution)
model1.lp.sol.df<-rbind(model1.lp.sol.df,c(model1.lp.sol$optimum))
rownames(model1.lp.sol.df)<-c(attr(model1.lp,"objective_vars_names"),"obj")
colnames(model1.lp.sol.df)<-"Solution"
table.sol<-xtable(model1.lp.sol.df,digits=decimal)
results<-list(sol=model1.lp.sol,df=model1.lp.sol.df,latex=table.sol)
return(results)
}
modela<-SolverLP("furnituretypes_first")
getwd()
| /bookex.R | no_license | psmgeelen/businessanalysis | R | false | false | 907 | r | SolverLP<-function(model,method="CPLEX_LP",decimal=0)
{
library(Rglpk)
model1.lp<-Rglpk_read_file(model,type=method,
verbose=F)
model1.lp.sol<-Rglpk_solve_LP(model1.lp$objective,
model1.lp$constraints[[1]],model1.lp$constraints
[[2]],model1.lp$constraints[[3]],model1.lp$bounds,
model1.lp$types,model1.lp$maximum)
library(xtable)
model1.lp.sol.df<-as.data.frame(model1.lp.sol$solution)
model1.lp.sol.df<-rbind(model1.lp.sol.df,c(model1.lp.sol$optimum))
rownames(model1.lp.sol.df)<-c(attr(model1.lp,"objective_vars_names"),"obj")
colnames(model1.lp.sol.df)<-"Solution"
table.sol<-xtable(model1.lp.sol.df,digits=decimal)
results<-list(sol=model1.lp.sol,df=model1.lp.sol.df,latex=table.sol)
return(results)
}
modela<-SolverLP("furnituretypes_first")
getwd()
|
source(file.path(path.package('swirl'), 'Courses', 'R_ProgrammingDR', 'basis.R'))
dbs_on_demand <- function() {
return(submit_dbs_on_demand('r_workspaces_and_files'))
}
| /Workspace_and_Files/customTests.R | no_license | darrenredmond/R_ProgrammingDR | R | false | false | 172 | r | source(file.path(path.package('swirl'), 'Courses', 'R_ProgrammingDR', 'basis.R'))
dbs_on_demand <- function() {
return(submit_dbs_on_demand('r_workspaces_and_files'))
}
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
if(input$x=='a'){
i<-1
}
if(input$x=='b'){
i<-2
}
if(input$x=='c'){
i<-3
}
if(input$x=='d'){
i<-4
}
if(input$y=='e'){
j<-1
}
if(input$y=='f'){
j<-2
}
if(input$y=='g'){
j<-3
}
if(input$y=='h'){
j<-4
}
s <- iris[, i]
k <- iris[, j]
plot(s,k)
})
})
| /server.R | no_license | rpkon/DDP-Week4 | R | false | false | 710 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
if(input$x=='a'){
i<-1
}
if(input$x=='b'){
i<-2
}
if(input$x=='c'){
i<-3
}
if(input$x=='d'){
i<-4
}
if(input$y=='e'){
j<-1
}
if(input$y=='f'){
j<-2
}
if(input$y=='g'){
j<-3
}
if(input$y=='h'){
j<-4
}
s <- iris[, i]
k <- iris[, j]
plot(s,k)
})
})
|
# created on 20/03/20
# load packages
require(mxnet)
require(raster)
require(ggplot2)
.pardefault <- par()
dataDir = "/home/leguilln/workspace/birds_and_landscape/birds_and_landscape_patterns/data"
dataPath = paste(dataDir, 'data_birds_landscape_CNN.RData', sep="/")
load(dataPath)
setwd(dataDir)
modelName = "test_cae_best_4"
train = FALSE
loadModel = "test_cae_best_4" #NULL
iteration = 0
n_it = 5000
newBirdsArray = array( unlist(birds_array[1,,]) , dim=dim(birds_array[1,,]) )
dimnames(newBirdsArray) = dimnames(birds_array)[2:3]
inY = colnames(newBirdsArray)
inX = as.character(dimnames(raster_array)[3][[1]])
inXandY = intersect(inX,inY)
inXandYnotNa = setdiff(inXandY,raster_with_na)
Xarray= raster_array[,,dimnames(raster_array)[3][[1]]%in%inXandYnotNa]
dim(Xarray) = c(dim(Xarray)[1],dim(Xarray)[2],1,dim(Xarray)[3])
MainDevice = mx.cpu()
# create train, test and validation data sets
nRasters = dim(Xarray)[4]
nValid = 500
nTest = 100
bag = 1:nRasters
trainSample = sample(bag, nRasters - nValid - nTest)
validSample = sample( setdiff(bag,trainSample) , nValid )
testSample = setdiff( setdiff(bag,trainSample) , validSample)
Xtrain = Xarray[,,,trainSample,drop=F]
Xvalid = Xarray[,,,validSample,drop=F]
Xtest = Xarray[,,,testSample,drop=F]
# save(Xtrain, Xvalid, Xtest, file = "train_valid_test.RData")
load("train_valid_test.RData")
### define the convolutional autoencoder architectur e
data = mx.symbol.Variable(name = "data")
label = mx.symbol.Variable(name = "label")
## the encoder part of the network is made of 2 max-pooling convolutional layers
# 1st convolutional layer
conv1 = mx.symbol.Convolution(data = data,
kernel = c(3, 3),
pad = c(2, 2),
num_filter = 16,
name = "conv1")
relu1 = mx.symbol.Activation(data = conv1,
act_type = "relu",
name = "relu1")
pool1 = mx.symbol.Pooling(data = relu1,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool1")
# 2nd convolutional layer
conv2 = mx.symbol.Convolution(data = pool1,
kernel = c(3, 3),
pad = c(1, 1),
num_filter = 4,
name = "conv2")
relu2 = mx.symbol.Activation(data = conv2,
act_type = "relu",
name = "relu2")
pool2 = mx.symbol.Pooling(data = relu2,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool2")
## the decoder part of the network is made of 2 transposed convolutional layers
# 1st transposed convolutional layer
deconv1 = mx.symbol.Deconvolution(data = pool2,
kernel = c(2, 2),
stride = c(2, 2),
num_filter = 16,
layout='NCHW',
name = "deconv1")
# 2nd transposed convolutional layer
deconv2 = mx.symbol.Deconvolution(data = deconv1,
kernel = c(2, 2),
stride = c(2, 2),
num_filter = 1,
layout='NCHW',
name = "deconv2")
# mean squared error
loss <- mx.symbol.LinearRegressionOutput(deconv2, name = "loss")
### mxnet utilities
mx.callback.early.stop.and.save.checkpoint <- function(train.metric = NULL, eval.metric = NULL, bad.steps = NULL, maximize = FALSE, verbose = FALSE, prefix = "") {
function (iteration, nbatch, env, verbose = verbose)
{
if (!is.null(env$metric)) {
if (!is.null(train.metric)) {
result <- env$metric$get(env$train.metric)
if ((!maximize && result$value < train.metric) ||
(maximize && result$value > train.metric)) {
return(FALSE)
}
}
if (!is.null(eval.metric)) {
if (!is.null(env$eval.metric)) {
result <- env$metric$get(env$eval.metric)
if ((!maximize && result$value < eval.metric) ||
(maximize && result$value > eval.metric)) {
return(FALSE)
}
}
}
}
if (!is.null(bad.steps)) {
if (iteration == 1) {
mx.best.iter <<- 1
if (maximize) {
mx.best.score <<- 0
}
else {
mx.best.score <<- Inf
}
}
if (!is.null(env$eval.metric)) {
result <- env$metric$get(env$eval.metric)
if ((!maximize && result$value > mx.best.score) ||
(maximize && result$value < mx.best.score)) {
if (mx.best.iter == bad.steps) {
if (verbose) {
message("Best score=", mx.best.score, ", iteration [",
iteration - bad.steps, "]")
}
return(FALSE)
}
else {
mx.best.iter <<- mx.best.iter + 1
}
}
else {
mx.best.score <<- result$value
mx.best.iter <<- 1
mx.model.save(env$model, prefix, 0)
cat(sprintf("Model checkpoint saved to %s-0.params\n", prefix))
}
}
}
return(TRUE)
}
}
### learn and save model
# Training parameters
bad.steps = 50
batch.size = 32
saveDir = dataDir
prefix = paste(saveDir, modelName, sep="/")
if(!is.null(loadModel)) {
# OR load pre-trained model
setwd(saveDir)
model = mx.model.load(modelName, iteration = 0)
}
if(train == TRUE) {
if(is.null(loadModel)) {
# Randomly initialize the model weights
mx.set.seed(2019)
model = mx.model.FeedForward.create(symbol=loss,
X=Xtrain,
y=Xtrain,
eval.data=list(data=Xvalid, label=Xvalid),
ctx=MainDevice,
begin.round=1,
num.round=n_it,
array.batch.size=batch.size,
optimizer="adagrad",
initializer=mx.init.Xavier(),
eval.metric=mx.metric.mse,
epoch.end.callback=mx.callback.early.stop.and.save.checkpoint(bad.steps=bad.steps, prefix=prefix, verbose=TRUE)
)
} else {
#continue training
mx.best.iter <<- 0
model = mx.model.FeedForward.create(model$symbol,
X=Xtrain,
eval.data=list(data=Xvalid, label=Xvalid),
ctx=MainDevice,
begin.round=iteration,
num.round=n_it,
array.batch.size=batch.size,
optimizer="adagrad",
eval.metric=mx.metric.mse,
arg.params=model$arg.params,
aux.params=model$aux.params,
epoch.end.callback=mx.callback.early.stop.and.save.checkpoint(bad.steps=bad.steps, prefix=prefix, verbose=TRUE)
)
}
}
predicted = predict(model, X=Xtest)
par(mfrow=c(3,2))
for(i in 1:3) {
image(Xtest[,,1,i], useRaster=TRUE, axes=FALSE)
image(predicted[,,1,i], useRaster=TRUE, axes=FALSE)
}
encode <- function(input, model)
{
arg.params = model$arg.params[c("conv1_weight", "conv1_bias", "conv2_weight", "conv2_bias")]
data = mx.symbol.Variable("data")
# 1st convolutional layer
conv1 = mx.symbol.Convolution(data = data,
# weight = model$arg.params$conv1_weight,
# bias = arg.params["conv1_bias"],
kernel = c(3, 3),
pad = c(2, 2),
num_filter = 16,
name = "conv1")
relu1 = mx.symbol.Activation(data = conv1,
act_type = "relu",
name = "relu1")
pool1 = mx.symbol.Pooling(data = relu1,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool1")
# 2nd convolutional layer
conv2 = mx.symbol.Convolution(data = pool1,
# weight = arg.params["conv2_weight"],
# bias = arg.params["conv2_bias"],
kernel = c(3, 3),
pad = c(1, 1),
num_filter = 4,
name = "conv2")
relu2 = mx.symbol.Activation(data = conv2,
act_type = "relu",
name = "relu2")
pool2 = mx.symbol.Pooling(data = relu2,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool2")
flatten = mx.symbol.flatten(data = pool2)
# transpose = mx.symbol.transpose(data = flatten)
encoder_model = list(symbol = flatten, arg.params = arg.params, aux.params = list())
class(encoder_model) = "MXFeedForwardModel"
output <- predict(encoder_model, X=input) #, array.layout = "colmajor")
return(output)
}
latent_vector <- function(data, result, index)
{
image(Xtest[,,1,index], useRaster=TRUE, axes=FALSE)
print(result[,,,i])
}
predicted = encode(Xarray, model)
print(dim(predicted))
# t-SNE analysis
library(Rtsne)
library("ggplot2")
library("ggimage")
par(mfrow=c(1,1))
dev.off()
tsne = Rtsne(t(predicted), check_duplicates = FALSE, theta=0.0)
for(i in 1:nRasters) {
writePNG(1-Xarray[,,1,i]/255., target=sprintf('./land_test_images/land_%04d.png', i))
}
img = list.files(path="./land_test_images", pattern="*.png")
img = paste("./land_test_images/", img, sep="")
d = data.frame(x=tsne$Y[,1], y=tsne$Y[,2], image=img)
p = ggplot(d, aes(x, y)) + geom_image(aes(image=image), size=.05)
print(p) | /test_CAE_model.R | no_license | ChrisBotella/birds_and_landscape_patterns | R | false | false | 10,562 | r | # created on 20/03/20
# load packages
require(mxnet)
require(raster)
require(ggplot2)
.pardefault <- par()
dataDir = "/home/leguilln/workspace/birds_and_landscape/birds_and_landscape_patterns/data"
dataPath = paste(dataDir, 'data_birds_landscape_CNN.RData', sep="/")
load(dataPath)
setwd(dataDir)
modelName = "test_cae_best_4"
train = FALSE
loadModel = "test_cae_best_4" #NULL
iteration = 0
n_it = 5000
newBirdsArray = array( unlist(birds_array[1,,]) , dim=dim(birds_array[1,,]) )
dimnames(newBirdsArray) = dimnames(birds_array)[2:3]
inY = colnames(newBirdsArray)
inX = as.character(dimnames(raster_array)[3][[1]])
inXandY = intersect(inX,inY)
inXandYnotNa = setdiff(inXandY,raster_with_na)
Xarray= raster_array[,,dimnames(raster_array)[3][[1]]%in%inXandYnotNa]
dim(Xarray) = c(dim(Xarray)[1],dim(Xarray)[2],1,dim(Xarray)[3])
MainDevice = mx.cpu()
# create train, test and validation data sets
nRasters = dim(Xarray)[4]
nValid = 500
nTest = 100
bag = 1:nRasters
trainSample = sample(bag, nRasters - nValid - nTest)
validSample = sample( setdiff(bag,trainSample) , nValid )
testSample = setdiff( setdiff(bag,trainSample) , validSample)
Xtrain = Xarray[,,,trainSample,drop=F]
Xvalid = Xarray[,,,validSample,drop=F]
Xtest = Xarray[,,,testSample,drop=F]
# save(Xtrain, Xvalid, Xtest, file = "train_valid_test.RData")
load("train_valid_test.RData")
### define the convolutional autoencoder architectur e
data = mx.symbol.Variable(name = "data")
label = mx.symbol.Variable(name = "label")
## the encoder part of the network is made of 2 max-pooling convolutional layers
# 1st convolutional layer
conv1 = mx.symbol.Convolution(data = data,
kernel = c(3, 3),
pad = c(2, 2),
num_filter = 16,
name = "conv1")
relu1 = mx.symbol.Activation(data = conv1,
act_type = "relu",
name = "relu1")
pool1 = mx.symbol.Pooling(data = relu1,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool1")
# 2nd convolutional layer
conv2 = mx.symbol.Convolution(data = pool1,
kernel = c(3, 3),
pad = c(1, 1),
num_filter = 4,
name = "conv2")
relu2 = mx.symbol.Activation(data = conv2,
act_type = "relu",
name = "relu2")
pool2 = mx.symbol.Pooling(data = relu2,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool2")
## the decoder part of the network is made of 2 transposed convolutional layers
# 1st transposed convolutional layer
deconv1 = mx.symbol.Deconvolution(data = pool2,
kernel = c(2, 2),
stride = c(2, 2),
num_filter = 16,
layout='NCHW',
name = "deconv1")
# 2nd transposed convolutional layer
deconv2 = mx.symbol.Deconvolution(data = deconv1,
kernel = c(2, 2),
stride = c(2, 2),
num_filter = 1,
layout='NCHW',
name = "deconv2")
# mean squared error
loss <- mx.symbol.LinearRegressionOutput(deconv2, name = "loss")
### mxnet utilities
mx.callback.early.stop.and.save.checkpoint <- function(train.metric = NULL, eval.metric = NULL, bad.steps = NULL, maximize = FALSE, verbose = FALSE, prefix = "") {
function (iteration, nbatch, env, verbose = verbose)
{
if (!is.null(env$metric)) {
if (!is.null(train.metric)) {
result <- env$metric$get(env$train.metric)
if ((!maximize && result$value < train.metric) ||
(maximize && result$value > train.metric)) {
return(FALSE)
}
}
if (!is.null(eval.metric)) {
if (!is.null(env$eval.metric)) {
result <- env$metric$get(env$eval.metric)
if ((!maximize && result$value < eval.metric) ||
(maximize && result$value > eval.metric)) {
return(FALSE)
}
}
}
}
if (!is.null(bad.steps)) {
if (iteration == 1) {
mx.best.iter <<- 1
if (maximize) {
mx.best.score <<- 0
}
else {
mx.best.score <<- Inf
}
}
if (!is.null(env$eval.metric)) {
result <- env$metric$get(env$eval.metric)
if ((!maximize && result$value > mx.best.score) ||
(maximize && result$value < mx.best.score)) {
if (mx.best.iter == bad.steps) {
if (verbose) {
message("Best score=", mx.best.score, ", iteration [",
iteration - bad.steps, "]")
}
return(FALSE)
}
else {
mx.best.iter <<- mx.best.iter + 1
}
}
else {
mx.best.score <<- result$value
mx.best.iter <<- 1
mx.model.save(env$model, prefix, 0)
cat(sprintf("Model checkpoint saved to %s-0.params\n", prefix))
}
}
}
return(TRUE)
}
}
### learn and save model
# Training parameters
bad.steps = 50
batch.size = 32
saveDir = dataDir
prefix = paste(saveDir, modelName, sep="/")
if(!is.null(loadModel)) {
# OR load pre-trained model
setwd(saveDir)
model = mx.model.load(modelName, iteration = 0)
}
if(train == TRUE) {
if(is.null(loadModel)) {
# Randomly initialize the model weights
mx.set.seed(2019)
model = mx.model.FeedForward.create(symbol=loss,
X=Xtrain,
y=Xtrain,
eval.data=list(data=Xvalid, label=Xvalid),
ctx=MainDevice,
begin.round=1,
num.round=n_it,
array.batch.size=batch.size,
optimizer="adagrad",
initializer=mx.init.Xavier(),
eval.metric=mx.metric.mse,
epoch.end.callback=mx.callback.early.stop.and.save.checkpoint(bad.steps=bad.steps, prefix=prefix, verbose=TRUE)
)
} else {
#continue training
mx.best.iter <<- 0
model = mx.model.FeedForward.create(model$symbol,
X=Xtrain,
eval.data=list(data=Xvalid, label=Xvalid),
ctx=MainDevice,
begin.round=iteration,
num.round=n_it,
array.batch.size=batch.size,
optimizer="adagrad",
eval.metric=mx.metric.mse,
arg.params=model$arg.params,
aux.params=model$aux.params,
epoch.end.callback=mx.callback.early.stop.and.save.checkpoint(bad.steps=bad.steps, prefix=prefix, verbose=TRUE)
)
}
}
predicted = predict(model, X=Xtest)
par(mfrow=c(3,2))
for(i in 1:3) {
image(Xtest[,,1,i], useRaster=TRUE, axes=FALSE)
image(predicted[,,1,i], useRaster=TRUE, axes=FALSE)
}
encode <- function(input, model)
{
arg.params = model$arg.params[c("conv1_weight", "conv1_bias", "conv2_weight", "conv2_bias")]
data = mx.symbol.Variable("data")
# 1st convolutional layer
conv1 = mx.symbol.Convolution(data = data,
# weight = model$arg.params$conv1_weight,
# bias = arg.params["conv1_bias"],
kernel = c(3, 3),
pad = c(2, 2),
num_filter = 16,
name = "conv1")
relu1 = mx.symbol.Activation(data = conv1,
act_type = "relu",
name = "relu1")
pool1 = mx.symbol.Pooling(data = relu1,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool1")
# 2nd convolutional layer
conv2 = mx.symbol.Convolution(data = pool1,
# weight = arg.params["conv2_weight"],
# bias = arg.params["conv2_bias"],
kernel = c(3, 3),
pad = c(1, 1),
num_filter = 4,
name = "conv2")
relu2 = mx.symbol.Activation(data = conv2,
act_type = "relu",
name = "relu2")
pool2 = mx.symbol.Pooling(data = relu2,
pool_type = "max",
kernel = c(2,2),
stride=c(2,2),
name = "pool2")
flatten = mx.symbol.flatten(data = pool2)
# transpose = mx.symbol.transpose(data = flatten)
encoder_model = list(symbol = flatten, arg.params = arg.params, aux.params = list())
class(encoder_model) = "MXFeedForwardModel"
output <- predict(encoder_model, X=input) #, array.layout = "colmajor")
return(output)
}
latent_vector <- function(data, result, index)
{
image(Xtest[,,1,index], useRaster=TRUE, axes=FALSE)
print(result[,,,i])
}
predicted = encode(Xarray, model)
print(dim(predicted))
# t-SNE analysis
library(Rtsne)
library("ggplot2")
library("ggimage")
par(mfrow=c(1,1))
dev.off()
tsne = Rtsne(t(predicted), check_duplicates = FALSE, theta=0.0)
for(i in 1:nRasters) {
writePNG(1-Xarray[,,1,i]/255., target=sprintf('./land_test_images/land_%04d.png', i))
}
img = list.files(path="./land_test_images", pattern="*.png")
img = paste("./land_test_images/", img, sep="")
d = data.frame(x=tsne$Y[,1], y=tsne$Y[,2], image=img)
p = ggplot(d, aes(x, y)) + geom_image(aes(image=image), size=.05)
print(p) |
library(staTools)
### Name: MPE
### Title: Mean Percentage Error
### Aliases: MPE
### Keywords: error mean percentage
### ** Examples
x = runif(10)
y = runif(10)
MPE(x,y)
| /data/genthat_extracted_code/staTools/examples/MPE.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 178 | r | library(staTools)
### Name: MPE
### Title: Mean Percentage Error
### Aliases: MPE
### Keywords: error mean percentage
### ** Examples
x = runif(10)
y = runif(10)
MPE(x,y)
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(1.390501319012e-309, -5.48545438509459e+303, -6.5929223516236e+303, 1.1862348434052e+194, 2.85270993200814e+48, 4.79592282367135e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result) | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125495-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 411 | r | testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(1.390501319012e-309, -5.48545438509459e+303, -6.5929223516236e+303, 1.1862348434052e+194, 2.85270993200814e+48, 4.79592282367135e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
setwd("/sdata/images/projects/GENSCOT/1/andrew/painMDD/")
source("scripts/include/run_analysis.R")
dataframe$zlikert_total<-scale(dataframe$likert_total)
dataframe$zeysenck_N <- scale(dataframe$eysenck_N)
traits<-c("zlikert_total","gfactor","zeysenck_N")
scores <-c("bdldscore","sczldscore","mddldscore")
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJ",today,".txt"),append=FALSE)
cat("PGC polygenic prediction of psychol vars in GS\n")
sink()
for(i in 1:(length(traits))){
for(j in 1:length(scores)){
asr.formula <- paste("fixed=",traits[i]," ~sex + age + I(age^2) + dep_status + C1+C2+C3+C4 +", scores[j])
asr.model<- asreml(fixed=as.formula(asr.formula)
,random= ~ped(id, var=T, init=1)
,ginverse=list(id=ainv)
,data=dataframe
,maxiter=15000,trace=FALSE,na.method.X="omit",workspace=8e+08)
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJ",today,".txt"),append=TRUE)
cat("\n")
print(asr.model$fixed.formula)
print(summary(asr.model, all=T)$coef.fixed)
beta_ldpred <- summary(asr.model, all=T)$coef.fixed[1,1]
print(paste0("beta for ldpred = ", beta_ldpred))
print(wald.asreml(asr.model, ssType="conditional", denDF="numeric"))
#calculate predicted phenotype value
dataframe$temp <- dataframe[,scores[j]]*beta_ldpred
var_explained <- var(dataframe$temp, na.rm=TRUE)
var_total <- var(dataframe[,traits[i]], na.rm=TRUE)
r2<- var_explained/var_total
print(paste0("variance explained by ",scores[j], " in ",traits[i]," = ",r2))
r2 <- var_explained <- var_total <- NULL
cat("\n")
sink()
}
}
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJdistress",today,".txt"),append=FALSE)
cat("PGC polygenic prediction of psychol vars in GS\n")
sink()
for(i in 1:(length(traits))){
for(j in 1:length(scores)){
asr.formula <- paste("fixed=",traits[i]," ~sex + age + I(age^2) + likert_total + C1+C2+C3+C4 +", scores[j])
asr.model<- asreml(fixed=as.formula(asr.formula)
,random= ~ped(id, var=T, init=1)
,ginverse=list(id=ainv)
,data=dataframe
,maxiter=15000,trace=FALSE,na.method.X="omit",workspace=8e+08)
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJdistress",today,".txt"),append=TRUE)
cat("\n")
print(asr.model$fixed.formula)
print(summary(asr.model, all=T)$coef.fixed)
beta_ldpred <- summary(asr.model, all=T)$coef.fixed[1,1]
print(paste0("beta for ldpred = ", beta_ldpred))
print(wald.asreml(asr.model, ssType="conditional", denDF="numeric"))
#calculate predicted phenotype value
dataframe$temp <- dataframe[,scores[j]]*beta_ldpred
var_explained <- var(dataframe$temp, na.rm=TRUE)
var_total <- var(dataframe[,traits[i]], na.rm=TRUE)
r2<- var_explained/var_total
print(paste0("variance explained by ",scores[j], " in ",traits[i]," = ",r2))
r2 <- var_explained <- var_total <- NULL
cat("\n")
sink()
}
}
| /scripts/pgrs_asreml_gs/pgcpgrs-into-depressionRelatedAdjusted.R | permissive | mcintosh2001/mddPain | R | false | false | 3,122 | r | setwd("/sdata/images/projects/GENSCOT/1/andrew/painMDD/")
source("scripts/include/run_analysis.R")
dataframe$zlikert_total<-scale(dataframe$likert_total)
dataframe$zeysenck_N <- scale(dataframe$eysenck_N)
traits<-c("zlikert_total","gfactor","zeysenck_N")
scores <-c("bdldscore","sczldscore","mddldscore")
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJ",today,".txt"),append=FALSE)
cat("PGC polygenic prediction of psychol vars in GS\n")
sink()
for(i in 1:(length(traits))){
for(j in 1:length(scores)){
asr.formula <- paste("fixed=",traits[i]," ~sex + age + I(age^2) + dep_status + C1+C2+C3+C4 +", scores[j])
asr.model<- asreml(fixed=as.formula(asr.formula)
,random= ~ped(id, var=T, init=1)
,ginverse=list(id=ainv)
,data=dataframe
,maxiter=15000,trace=FALSE,na.method.X="omit",workspace=8e+08)
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJ",today,".txt"),append=TRUE)
cat("\n")
print(asr.model$fixed.formula)
print(summary(asr.model, all=T)$coef.fixed)
beta_ldpred <- summary(asr.model, all=T)$coef.fixed[1,1]
print(paste0("beta for ldpred = ", beta_ldpred))
print(wald.asreml(asr.model, ssType="conditional", denDF="numeric"))
#calculate predicted phenotype value
dataframe$temp <- dataframe[,scores[j]]*beta_ldpred
var_explained <- var(dataframe$temp, na.rm=TRUE)
var_total <- var(dataframe[,traits[i]], na.rm=TRUE)
r2<- var_explained/var_total
print(paste0("variance explained by ",scores[j], " in ",traits[i]," = ",r2))
r2 <- var_explained <- var_total <- NULL
cat("\n")
sink()
}
}
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJdistress",today,".txt"),append=FALSE)
cat("PGC polygenic prediction of psychol vars in GS\n")
sink()
for(i in 1:(length(traits))){
for(j in 1:length(scores)){
asr.formula <- paste("fixed=",traits[i]," ~sex + age + I(age^2) + likert_total + C1+C2+C3+C4 +", scores[j])
asr.model<- asreml(fixed=as.formula(asr.formula)
,random= ~ped(id, var=T, init=1)
,ginverse=list(id=ainv)
,data=dataframe
,maxiter=15000,trace=FALSE,na.method.X="omit",workspace=8e+08)
sink(paste0(locations$results,"/pgrs-gs/pgcpgrs_into_psychADJdistress",today,".txt"),append=TRUE)
cat("\n")
print(asr.model$fixed.formula)
print(summary(asr.model, all=T)$coef.fixed)
beta_ldpred <- summary(asr.model, all=T)$coef.fixed[1,1]
print(paste0("beta for ldpred = ", beta_ldpred))
print(wald.asreml(asr.model, ssType="conditional", denDF="numeric"))
#calculate predicted phenotype value
dataframe$temp <- dataframe[,scores[j]]*beta_ldpred
var_explained <- var(dataframe$temp, na.rm=TRUE)
var_total <- var(dataframe[,traits[i]], na.rm=TRUE)
r2<- var_explained/var_total
print(paste0("variance explained by ",scores[j], " in ",traits[i]," = ",r2))
r2 <- var_explained <- var_total <- NULL
cat("\n")
sink()
}
}
|
library(foreach)
library(snow)
library(doSNOW)
library(randomForest)
library(class)
library(data.table)
# Import and prepare data
data <- read.csv("Kaggle_Covertype_training.csv", header = TRUE, sep = ",")
data1 <- data
# Perform feature construction
data1 <- cbind(data1, EVDtH_1 = data1$elevation-data1$ver_dist_hyd)
data1 <- cbind(data1, EHDtH_2 = data1$elevation-(data1$hor_dist_hyd*0.2))
data1 <- data1[,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,56)]
set.seed(1234)
subset <- sample(seq(1,40000,1),40000) # random subsetting of training data
xTrain <- data1[subset,2:(ncol(data1)-1)]
xTrain <- sapply(xTrain,as.numeric)
yTrain <- data1[subset,58]
yTrain <- factor(yTrain)
xTest <- data1[-subset,2:(ncol(data1)-1)]
xTest <- sapply(xTest,as.numeric)
yTest <- data1[-subset,58]
yTest <- factor(yTest)
##################################################
noCores <- 3
cl <- makeCluster(noCores, type="SOCK", outfile="")
registerDoSNOW(cl)
kOpt <- c(1,3,5,7,9,11,13,15,17,19,23,27,29,31,37,41,43,47,51)
resultsRF <- foreach(k = kOpt,
.combine = rbind, .packages = "class") %dopar% {
cat("k = ", k, fill = TRUE)
# kNN results
predictedClasses <- knn(train=xTrain, test=xTest, cl=yTrain, k=k)
predictedClasses <- factor(predictedClasses, levels=c(1,2,3,4,5,6,7))
yTest <- factor(yTest, levels=c(1,2,3,4,5,6,7))
predictionError <- mean(predictedClasses != yTest)
result <- c(k, predictionError)
}
# puts results in a data table
colnames(resultsRF) <- c("k", "Error")
results2 <- data.table(resultsRF)
results2 <- results2[with(results2, order(k)), ]
# save results in .csv file
write.csv(results2,"knn_testing.csv", row.names=F)
stopCluster(cl) | /kNN-ThyNearestNeighbours.R | no_license | SteveCarmody/ThyNearestNeighbours | R | false | false | 2,115 | r | library(foreach)
library(snow)
library(doSNOW)
library(randomForest)
library(class)
library(data.table)
# Import and prepare data
data <- read.csv("Kaggle_Covertype_training.csv", header = TRUE, sep = ",")
data1 <- data
# Perform feature construction
data1 <- cbind(data1, EVDtH_1 = data1$elevation-data1$ver_dist_hyd)
data1 <- cbind(data1, EHDtH_2 = data1$elevation-(data1$hor_dist_hyd*0.2))
data1 <- data1[,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,56)]
set.seed(1234)
subset <- sample(seq(1,40000,1),40000) # random subsetting of training data
xTrain <- data1[subset,2:(ncol(data1)-1)]
xTrain <- sapply(xTrain,as.numeric)
yTrain <- data1[subset,58]
yTrain <- factor(yTrain)
xTest <- data1[-subset,2:(ncol(data1)-1)]
xTest <- sapply(xTest,as.numeric)
yTest <- data1[-subset,58]
yTest <- factor(yTest)
##################################################
noCores <- 3
cl <- makeCluster(noCores, type="SOCK", outfile="")
registerDoSNOW(cl)
kOpt <- c(1,3,5,7,9,11,13,15,17,19,23,27,29,31,37,41,43,47,51)
resultsRF <- foreach(k = kOpt,
.combine = rbind, .packages = "class") %dopar% {
cat("k = ", k, fill = TRUE)
# kNN results
predictedClasses <- knn(train=xTrain, test=xTest, cl=yTrain, k=k)
predictedClasses <- factor(predictedClasses, levels=c(1,2,3,4,5,6,7))
yTest <- factor(yTest, levels=c(1,2,3,4,5,6,7))
predictionError <- mean(predictedClasses != yTest)
result <- c(k, predictionError)
}
# puts results in a data table
colnames(resultsRF) <- c("k", "Error")
results2 <- data.table(resultsRF)
results2 <- results2[with(results2, order(k)), ]
# save results in .csv file
write.csv(results2,"knn_testing.csv", row.names=F)
stopCluster(cl) |
## This R file is to prepare tidy data for the sumsung data analysis.
## The default directory of this R script is the sumsung data folder path
## on your working directory. (Assumption: The program executor download and
## extract the data file in the R studio's working directory.)
## i.e. ~/UCI HAR Dataset
## Data source: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## Change working directory
setwd("~/UCI HAR Dataset")
## load the necessary library
library(dplyr)
library(tidyr)
#### Merges the training and the test sets to create one data set "mergedDataSet"
#[a] Read FEATURE (Type) names info, assign new variable name for the data frame
features = read.table("features.txt")
names(features) <- c("featureID", "featureName")
#[b] Read ACTIVITY TYPE info, assign new variable name for the data frame
activity <- read.table("activity_labels.txt")
names(activity) <- c("activityID", "activityName")
#[c] Read SUBJECT / VOLUNTEER's signal info and put them to tempoary data frames
# , assign a variable name
# , add signal ID column
# , and merge SUBJECT's signal info into a data frame
# (Note: the signal ID is same as the row number)
subject_test <- read.table("test/subject_test.txt")
subject_train <- read.table("train/subject_train.txt")
names(subject_test) <- "subjectID"
names(subject_train) <- "subjectID"
subject_test$signalID <- 1:nrow(subject_test)
subject_train$signalID <- (nrow(subject_test)+1) : (nrow(subject_test)+nrow(subject_train))
subject <- merge(subject_test, subject_train, all = TRUE, sort=FALSE)
rm("subject_test")
rm("subject_train")
#[d] Read ACTIVITY's signal info and put them to temporary data frames
# , assign a variable name
# , add signal ID column
# , add activityName column by [b]
# , and merge ACTIVITY's signal info into a data frame
# (Note: the signal ID is same as the row number)
y_test <- read.table("test/y_test.txt")
y_train <- read.table("train/y_train.txt")
names(y_test) <- "activityID"
names(y_train) <- "activityID"
y_test$signalID <- 1:nrow(y_test)
y_train$signalID <- (nrow(y_test)+1) : (nrow(y_test)+nrow(y_train))
y <- merge(y_test[c(2,1)], y_train[c(2,1)], all=TRUE, sort=FALSE)
y <- merge(activity, y, by="activityID")
rm("y_test")
rm("y_train")
#[e] Read FEATURE's signal info and put them to temporary data frames
# , assign a variable name (set the feature name as the column name)
# , add signal ID column (Note: the signal ID is same as the row number)
# , add dataSource column (the value is either test or train, the sub-folder name of the original data source)
# ,and merge FEATURE's signal info into a new data frame
X_test <- read.table("test/X_test.txt")
X_train <- read.table("train/X_train.txt")
names(X_test) <- features[,2]
names(X_train) <- features[,2]
X_test$signalID <- 1:nrow(X_test)
X_train$signalID <- (nrow(X_test)+1) : (nrow(X_test)+nrow(X_train))
X_test$dataSource <- "test"
X_train$dataSource <- "train"
X <- merge(X_test[c(562, 1:561, 563)], X_train[c(562, 1:561, 563)], all=TRUE, sort=FALSE)
rm("X_test")
rm("X_train")
#[f] Select those necessary columns from [e] by matching the key words of the column name
# that included the data key signalID and those column related to
# measurements on the mean and standard deviation.
selCol <- grep("signalID|dataSource|mean()|std()", colnames(X), ignore.case=FALSE)
#[g] Create new data set "mergedDataSet"
mergedDataSet <- select(X, selCol)
#[h] Add acitivtyName to data set "mergedDataSet"
mergedDataSet <- merge(mergedDataSet, y[2:3], by="signalID")
#[i] Add subject to data set "mergedDataSet"
mergedDataSet <- merge(mergedDataSet, subject, by="signalID")
#[j] Reorder the data set column for easy reading
# (Note: before re-ordering,
# column 1 is signalID,
# column 2 - 80 are the feature/measurement results
# Column 81 - 83 (the last 3 columns) are dataSource, activityName, subjectID
# After re-ordering, the sequence of column will change to
# signalID, dataSource, activityName, subjectID, and then following with feature results)
numOfMeasurement <- (ncol(mergedDataSet) - 3 + 1)
mergedDataSet <- select(mergedDataSet
, 1
, numOfMeasurement: ncol(mergedDataSet)
, 2:numOfMeasurement)
#### Select 15 varibles from tidy data set [j] for analysis with assign meaningable names.
#### The target result will show the average of each variable for each activity and each subject.
#### Besides, it will export the result to text file "run_analysis_result.txt" in the working directory
#[k] Select 15 varaibles to separate data set
variableSet<- select(mergedDataSet, 3:16)
#[l] Rename the varibles for easy to programming
names(variableSet) <- c("activityName","subjectID"
,"tBodyAcc_Mean_X", "tBodyAcc_Mean_Y", "tBodyAcc_Mean_Z"
,"tBodyAcc_std_X", "tBodyAcc_std_Y", "tBodyAcc_std_Z"
,"tGravityAcc_mean_X","tGravityAcc_mean_Y","tGravityAcc_mean_Z"
,"tGravityAcc_std_X", "tGravityAcc_std_Y", "tGravityAcc_std_Z")
#[m] Calculate the average value of each selected varaible
analysis_result <- variableSet %>%
select(activityName
, subjectID
, tBodyAcc_Mean_X, tBodyAcc_Mean_Y, tBodyAcc_Mean_Z
, tBodyAcc_std_X, tBodyAcc_std_Y, tBodyAcc_std_Z
, tGravityAcc_mean_X, tGravityAcc_mean_Y, tGravityAcc_mean_Z
, tGravityAcc_std_X, tGravityAcc_std_Y, tGravityAcc_std_Z
) %>%
group_by(activityName, subjectID) %>%
summarise(
count=n()
, avg_tBodyAcc_Mean_X = sum(tBodyAcc_Mean_X)/n()
, avg_tBodyAcc_Mean_Y = sum(tBodyAcc_Mean_Y)/n()
, avg_tBodyAcc_Mean_Z = sum(tBodyAcc_Mean_Z)/n()
, avg_tBodyAcc_std_X = sum(tBodyAcc_std_X)/n()
, avg_tBodyAcc_std_Y = sum(tBodyAcc_std_Y)/n()
, avg_tBodyAcc_std_Z = sum(tBodyAcc_std_Z)/n()
, avg_tGravityAcc_mean_X = sum(tGravityAcc_mean_X)/n()
, avg_tGravityAcc_mean_Y = sum(tGravityAcc_mean_Y)/n()
, avg_tGravityAcc_mean_Z = sum(tGravityAcc_mean_Z)/n()
, avg_tGravityAcc_std_X = sum(tGravityAcc_std_X)/n()
, avg_tGravityAcc_std_Y = sum(tGravityAcc_std_Y)/n()
, avg_tGravityAcc_std_Z = sum(tGravityAcc_std_Z)/n()
)
#[n] Write the result to run_analysis_result.txt and store in the working directory.
write.table(analysis_result, file="run_analysis_result.txt", row.names=FALSE)
| /run_analysis.R | no_license | lolitta/GettingAndCleaningDataProject | R | false | false | 6,469 | r | ## This R file is to prepare tidy data for the sumsung data analysis.
## The default directory of this R script is the sumsung data folder path
## on your working directory. (Assumption: The program executor download and
## extract the data file in the R studio's working directory.)
## i.e. ~/UCI HAR Dataset
## Data source: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## Change working directory
setwd("~/UCI HAR Dataset")
## load the necessary library
library(dplyr)
library(tidyr)
#### Merges the training and the test sets to create one data set "mergedDataSet"
#[a] Read FEATURE (Type) names info, assign new variable name for the data frame
features = read.table("features.txt")
names(features) <- c("featureID", "featureName")
#[b] Read ACTIVITY TYPE info, assign new variable name for the data frame
activity <- read.table("activity_labels.txt")
names(activity) <- c("activityID", "activityName")
#[c] Read SUBJECT / VOLUNTEER's signal info and put them to tempoary data frames
# , assign a variable name
# , add signal ID column
# , and merge SUBJECT's signal info into a data frame
# (Note: the signal ID is same as the row number)
subject_test <- read.table("test/subject_test.txt")
subject_train <- read.table("train/subject_train.txt")
names(subject_test) <- "subjectID"
names(subject_train) <- "subjectID"
subject_test$signalID <- 1:nrow(subject_test)
subject_train$signalID <- (nrow(subject_test)+1) : (nrow(subject_test)+nrow(subject_train))
subject <- merge(subject_test, subject_train, all = TRUE, sort=FALSE)
rm("subject_test")
rm("subject_train")
#[d] Read ACTIVITY's signal info and put them to temporary data frames
# , assign a variable name
# , add signal ID column
# , add activityName column by [b]
# , and merge ACTIVITY's signal info into a data frame
# (Note: the signal ID is same as the row number)
y_test <- read.table("test/y_test.txt")
y_train <- read.table("train/y_train.txt")
names(y_test) <- "activityID"
names(y_train) <- "activityID"
y_test$signalID <- 1:nrow(y_test)
y_train$signalID <- (nrow(y_test)+1) : (nrow(y_test)+nrow(y_train))
y <- merge(y_test[c(2,1)], y_train[c(2,1)], all=TRUE, sort=FALSE)
y <- merge(activity, y, by="activityID")
rm("y_test")
rm("y_train")
#[e] Read FEATURE's signal info and put them to temporary data frames
# , assign a variable name (set the feature name as the column name)
# , add signal ID column (Note: the signal ID is same as the row number)
# , add dataSource column (the value is either test or train, the sub-folder name of the original data source)
# ,and merge FEATURE's signal info into a new data frame
X_test <- read.table("test/X_test.txt")
X_train <- read.table("train/X_train.txt")
names(X_test) <- features[,2]
names(X_train) <- features[,2]
X_test$signalID <- 1:nrow(X_test)
X_train$signalID <- (nrow(X_test)+1) : (nrow(X_test)+nrow(X_train))
X_test$dataSource <- "test"
X_train$dataSource <- "train"
X <- merge(X_test[c(562, 1:561, 563)], X_train[c(562, 1:561, 563)], all=TRUE, sort=FALSE)
rm("X_test")
rm("X_train")
#[f] Select those necessary columns from [e] by matching the key words of the column name
# that included the data key signalID and those column related to
# measurements on the mean and standard deviation.
selCol <- grep("signalID|dataSource|mean()|std()", colnames(X), ignore.case=FALSE)
#[g] Create new data set "mergedDataSet"
mergedDataSet <- select(X, selCol)
#[h] Add acitivtyName to data set "mergedDataSet"
mergedDataSet <- merge(mergedDataSet, y[2:3], by="signalID")
#[i] Add subject to data set "mergedDataSet"
mergedDataSet <- merge(mergedDataSet, subject, by="signalID")
#[j] Reorder the data set column for easy reading
# (Note: before re-ordering,
# column 1 is signalID,
# column 2 - 80 are the feature/measurement results
# Column 81 - 83 (the last 3 columns) are dataSource, activityName, subjectID
# After re-ordering, the sequence of column will change to
# signalID, dataSource, activityName, subjectID, and then following with feature results)
numOfMeasurement <- (ncol(mergedDataSet) - 3 + 1)
mergedDataSet <- select(mergedDataSet
, 1
, numOfMeasurement: ncol(mergedDataSet)
, 2:numOfMeasurement)
#### Select 15 varibles from tidy data set [j] for analysis with assign meaningable names.
#### The target result will show the average of each variable for each activity and each subject.
#### Besides, it will export the result to text file "run_analysis_result.txt" in the working directory
#[k] Select 15 varaibles to separate data set
variableSet<- select(mergedDataSet, 3:16)
#[l] Rename the varibles for easy to programming
names(variableSet) <- c("activityName","subjectID"
,"tBodyAcc_Mean_X", "tBodyAcc_Mean_Y", "tBodyAcc_Mean_Z"
,"tBodyAcc_std_X", "tBodyAcc_std_Y", "tBodyAcc_std_Z"
,"tGravityAcc_mean_X","tGravityAcc_mean_Y","tGravityAcc_mean_Z"
,"tGravityAcc_std_X", "tGravityAcc_std_Y", "tGravityAcc_std_Z")
#[m] Calculate the average value of each selected varaible
analysis_result <- variableSet %>%
select(activityName
, subjectID
, tBodyAcc_Mean_X, tBodyAcc_Mean_Y, tBodyAcc_Mean_Z
, tBodyAcc_std_X, tBodyAcc_std_Y, tBodyAcc_std_Z
, tGravityAcc_mean_X, tGravityAcc_mean_Y, tGravityAcc_mean_Z
, tGravityAcc_std_X, tGravityAcc_std_Y, tGravityAcc_std_Z
) %>%
group_by(activityName, subjectID) %>%
summarise(
count=n()
, avg_tBodyAcc_Mean_X = sum(tBodyAcc_Mean_X)/n()
, avg_tBodyAcc_Mean_Y = sum(tBodyAcc_Mean_Y)/n()
, avg_tBodyAcc_Mean_Z = sum(tBodyAcc_Mean_Z)/n()
, avg_tBodyAcc_std_X = sum(tBodyAcc_std_X)/n()
, avg_tBodyAcc_std_Y = sum(tBodyAcc_std_Y)/n()
, avg_tBodyAcc_std_Z = sum(tBodyAcc_std_Z)/n()
, avg_tGravityAcc_mean_X = sum(tGravityAcc_mean_X)/n()
, avg_tGravityAcc_mean_Y = sum(tGravityAcc_mean_Y)/n()
, avg_tGravityAcc_mean_Z = sum(tGravityAcc_mean_Z)/n()
, avg_tGravityAcc_std_X = sum(tGravityAcc_std_X)/n()
, avg_tGravityAcc_std_Y = sum(tGravityAcc_std_Y)/n()
, avg_tGravityAcc_std_Z = sum(tGravityAcc_std_Z)/n()
)
#[n] Write the result to run_analysis_result.txt and store in the working directory.
write.table(analysis_result, file="run_analysis_result.txt", row.names=FALSE)
|
tic <- function() {
start_tic_toc_run_time <<- proc.time()
}
toc <- function() {
if (exists("start_tic_toc_run_time")) {
print(proc.time() - start_tic_toc_run_time)
rm("start_tic_toc_run_time", envir = .GlobalEnv)
} else {
warning("tic must be executed before toc")
}
}
| /R/tic_toc.R | no_license | isaacmichaud/IMisc | R | false | false | 291 | r | tic <- function() {
start_tic_toc_run_time <<- proc.time()
}
toc <- function() {
if (exists("start_tic_toc_run_time")) {
print(proc.time() - start_tic_toc_run_time)
rm("start_tic_toc_run_time", envir = .GlobalEnv)
} else {
warning("tic must be executed before toc")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genesis.R
\name{genesis}
\alias{genesis}
\alias{phyllotactic_genesis}
\alias{predefined_genesis}
\alias{bigbang_genesis}
\alias{aquarium_genesis}
\alias{petridish_genesis}
\title{Particle initialisation}
\usage{
phyllotactic_genesis(radius = 10, angle = pi * (3 - sqrt(5)))
predefined_genesis(x, y, x_vel = 0, y_vel = 0)
bigbang_genesis(vel_min = 0, vel_max = 1)
aquarium_genesis(width = 10, height = 10, vel_min = 0, vel_max = 1)
petridish_genesis(max_radius = 10, vel_min = 0, vel_max = 1)
}
\arguments{
\item{radius}{The radius modifier (will be multiplied by the square root of the index of the particle)}
\item{angle}{The angular difference between two adjacent particles}
\item{x, y}{The columns holding (or value of) the position coordinates}
\item{x_vel, y_vel}{The columns holding (or value of) the velocity verlets}
\item{vel_min, vel_max}{The bounds of the uniformly distributed velocities}
\item{width, height}{The size of the rectangle holding the particles}
\item{max_radius}{The size of the disc.}
}
\value{
A function that takes the particle graph and returns a list with a
position and velocity element, each holding a matrix with two columns and a
row for each particle giving the x and y position and velocity respectively.
}
\description{
These functions are passed to the simulation and defines how the position and
velocity of the particles are initiated. The default is to lay out the nodes
in a phyllotactic arrangement (think sunflower seeds) and with no velocity,
which is also the default in d3-force.
}
\section{Functions}{
\itemize{
\item \code{phyllotactic_genesis}: Initiates particles in a phyllotactic arrangement with zero velocity
\item \code{predefined_genesis}: Uses information from the node data to set position and velocity.
\item \code{bigbang_genesis}: Initiates particles at center position and a random velocity
\item \code{aquarium_genesis}: Places particles randomly in a rectangle and gives them a random velocity
\item \code{petridish_genesis}: Places particles randomly on a disc and gives them a random velocity
}}
\examples{
# A contrieved example
graph <- tidygraph::create_notable('bull')
genesis <- phyllotactic_genesis()
genesis(graph)
# Usually used as an argument to simulate
graph \%>\%
simulate(setup = phyllotactic_genesis())
}
| /particles/man/genesis.Rd | permissive | akhikolla/InformationHouse | R | false | true | 2,385 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genesis.R
\name{genesis}
\alias{genesis}
\alias{phyllotactic_genesis}
\alias{predefined_genesis}
\alias{bigbang_genesis}
\alias{aquarium_genesis}
\alias{petridish_genesis}
\title{Particle initialisation}
\usage{
phyllotactic_genesis(radius = 10, angle = pi * (3 - sqrt(5)))
predefined_genesis(x, y, x_vel = 0, y_vel = 0)
bigbang_genesis(vel_min = 0, vel_max = 1)
aquarium_genesis(width = 10, height = 10, vel_min = 0, vel_max = 1)
petridish_genesis(max_radius = 10, vel_min = 0, vel_max = 1)
}
\arguments{
\item{radius}{The radius modifier (will be multiplied by the square root of the index of the particle)}
\item{angle}{The angular difference between two adjacent particles}
\item{x, y}{The columns holding (or value of) the position coordinates}
\item{x_vel, y_vel}{The columns holding (or value of) the velocity verlets}
\item{vel_min, vel_max}{The bounds of the uniformly distributed velocities}
\item{width, height}{The size of the rectangle holding the particles}
\item{max_radius}{The size of the disc.}
}
\value{
A function that takes the particle graph and returns a list with a
position and velocity element, each holding a matrix with two columns and a
row for each particle giving the x and y position and velocity respectively.
}
\description{
These functions are passed to the simulation and defines how the position and
velocity of the particles are initiated. The default is to lay out the nodes
in a phyllotactic arrangement (think sunflower seeds) and with no velocity,
which is also the default in d3-force.
}
\section{Functions}{
\itemize{
\item \code{phyllotactic_genesis}: Initiates particles in a phyllotactic arrangement with zero velocity
\item \code{predefined_genesis}: Uses information from the node data to set position and velocity.
\item \code{bigbang_genesis}: Initiates particles at center position and a random velocity
\item \code{aquarium_genesis}: Places particles randomly in a rectangle and gives them a random velocity
\item \code{petridish_genesis}: Places particles randomly on a disc and gives them a random velocity
}}
\examples{
# A contrieved example
graph <- tidygraph::create_notable('bull')
genesis <- phyllotactic_genesis()
genesis(graph)
# Usually used as an argument to simulate
graph \%>\%
simulate(setup = phyllotactic_genesis())
}
|
# Author: Christina Bergmann (chbergma 'at' gmail)
# Last modified: 13.06.2016
# Description: This script prepares item files based on posteriors from forced alignments
library(dplyr)
alignments = read.csv("Supplementary/alignement.csv", header = FALSE)
segments_data = read.csv("Supplementary/segments.csv", header = FALSE)
names(alignments) <- c("segment", "phone_onset", "phone_offset", "posterior", "phone", "word")
names(segments_data) <- c("segment", "file", "segmentonset", "segmentoffset")
alignments$talker = substring(alignments$segment, 1, 2)
alignments$phone = as.character(alignments$phone)
data = left_join(alignments, segments_data, by = "segment") %>%
mutate(onset = phone_onset+segmentonset) %>%
mutate(offset = phone_offset+segmentonset) %>%
#Many thanks to Page Piccinini (page.piccinini 'at' gmail) for this code snippet which greatly improved efficiency
# To make sure it's all within file
group_by(file) %>%
# To make sure they are in the right order
arrange(onset) %>%
mutate(next_sound = lead(phone)) %>%
mutate(previous_sound = lag(phone)) %>%
ungroup()
#the pedestrian way, which takes forever
# for(line in 1:nrow(alignments)){
# if(line > 1){
# if(alignments$file[line]==alignments$file[line-1]){
# alignments$previous_sound[line] = as.character(alignments$phone[line-1])
# }
# else{alignments$previous_sound[line] = "_"}
# }
# else{alignments$previous_sound[line] = "_"}
# if(line < length(alignments$file) & alignments$file[line]==alignments$file[line+1]){
# alignments$next_sound[line] = as.character(alignments$phone[line+1])
# }
# else{alignments$next_sound[line] = "_"}
# }
alignments = as.data.frame(data)
alignments$previous_sound=ifelse(is.na(alignments$previous_sound), "_", alignments$previous_sound)
alignments$next_sound=ifelse(is.na(alignments$next_sound), "_", alignments$next_sound)
alignments$context = paste(alignments$previous_sound, alignments$next_sound, sep="-")
#Now we set different thresholds and create item files accordingly
threshold = 1
thresholded = subset(alignments, posterior>=threshold)
#for simplicity split into vowels and consonant files
vowels = c("aa", "ae", "ah", "ao", "aw", "ax", "ay", "eh", "er", "ey", "ih", "iy", "ow", "oy", "uh", "uw")
brent.vowels = droplevels(subset(thresholded, phone %in% vowels))
brent.cons = droplevels(subset(thresholded, !(phone %in% vowels)))
#Make the data structure fit the file format for easier write out later.
brent.vowels = brent.vowels[c("file", "onset", "offset", "phone", "context", "talker")]
brent.cons = brent.cons[c("file", "onset", "offset", "phone", "context", "talker")]
vowfile = paste("Brent_vowels_", as.character(threshold),".item", sep="")
consfile = paste("Brent_cons_", as.character(threshold),".item", sep="")
file.create(vowfile)
file.create(consfile)
write("#file onset offset #phone context talker", file = vowfile)
write("#file onset offset #phone context talker", file = consfile)
#Next we want to remove those elements where a context does not appear in all talkers per phone
talkers = levels(as.factor(thresholded$talker))
contexts = levels(as.factor(thresholded$context))
phones = levels(as.factor(brent.cons$phone))
#This is a very roundabout way and I am sure in dplyr there are better solutions.
for(phone in phones[phones != "SIL"]){
for(context in contexts){
subset = brent.cons[brent.cons$phone==phone & brent.cons$context==context, ]
if(length(unique(subset$talker))==length(talkers)){
write.table(subset, append = TRUE, sep = " ", col.names= FALSE, row.names = FALSE, quote = FALSE, file = consfile)
}
}
}
phones = levels(as.factor(brent.vowels$phone))
for(phone in phones){
for(context in contexts){
subset = brent.vowels[brent.vowels$phone==phone & brent.vowels$context==context, ]
if(length(unique(subset$talker))==length(talkers)){
write.table(subset, append = TRUE, sep = " ", col.names= FALSE, row.names = FALSE, quote = FALSE, file = vowfile)
}
}
}
| /Supplementary/CreateItemFiles.R | no_license | christinabergmann/ABX-Brent | R | false | false | 4,032 | r | # Author: Christina Bergmann (chbergma 'at' gmail)
# Last modified: 13.06.2016
# Description: This script prepares item files based on posteriors from forced alignments
library(dplyr)
alignments = read.csv("Supplementary/alignement.csv", header = FALSE)
segments_data = read.csv("Supplementary/segments.csv", header = FALSE)
names(alignments) <- c("segment", "phone_onset", "phone_offset", "posterior", "phone", "word")
names(segments_data) <- c("segment", "file", "segmentonset", "segmentoffset")
alignments$talker = substring(alignments$segment, 1, 2)
alignments$phone = as.character(alignments$phone)
data = left_join(alignments, segments_data, by = "segment") %>%
mutate(onset = phone_onset+segmentonset) %>%
mutate(offset = phone_offset+segmentonset) %>%
#Many thanks to Page Piccinini (page.piccinini 'at' gmail) for this code snippet which greatly improved efficiency
# To make sure it's all within file
group_by(file) %>%
# To make sure they are in the right order
arrange(onset) %>%
mutate(next_sound = lead(phone)) %>%
mutate(previous_sound = lag(phone)) %>%
ungroup()
#the pedestrian way, which takes forever
# for(line in 1:nrow(alignments)){
# if(line > 1){
# if(alignments$file[line]==alignments$file[line-1]){
# alignments$previous_sound[line] = as.character(alignments$phone[line-1])
# }
# else{alignments$previous_sound[line] = "_"}
# }
# else{alignments$previous_sound[line] = "_"}
# if(line < length(alignments$file) & alignments$file[line]==alignments$file[line+1]){
# alignments$next_sound[line] = as.character(alignments$phone[line+1])
# }
# else{alignments$next_sound[line] = "_"}
# }
alignments = as.data.frame(data)
alignments$previous_sound=ifelse(is.na(alignments$previous_sound), "_", alignments$previous_sound)
alignments$next_sound=ifelse(is.na(alignments$next_sound), "_", alignments$next_sound)
alignments$context = paste(alignments$previous_sound, alignments$next_sound, sep="-")
#Now we set different thresholds and create item files accordingly
threshold = 1
thresholded = subset(alignments, posterior>=threshold)
#for simplicity split into vowels and consonant files
vowels = c("aa", "ae", "ah", "ao", "aw", "ax", "ay", "eh", "er", "ey", "ih", "iy", "ow", "oy", "uh", "uw")
brent.vowels = droplevels(subset(thresholded, phone %in% vowels))
brent.cons = droplevels(subset(thresholded, !(phone %in% vowels)))
#Make the data structure fit the file format for easier write out later.
brent.vowels = brent.vowels[c("file", "onset", "offset", "phone", "context", "talker")]
brent.cons = brent.cons[c("file", "onset", "offset", "phone", "context", "talker")]
vowfile = paste("Brent_vowels_", as.character(threshold),".item", sep="")
consfile = paste("Brent_cons_", as.character(threshold),".item", sep="")
file.create(vowfile)
file.create(consfile)
write("#file onset offset #phone context talker", file = vowfile)
write("#file onset offset #phone context talker", file = consfile)
#Next we want to remove those elements where a context does not appear in all talkers per phone
talkers = levels(as.factor(thresholded$talker))
contexts = levels(as.factor(thresholded$context))
phones = levels(as.factor(brent.cons$phone))
#This is a very roundabout way and I am sure in dplyr there are better solutions.
for(phone in phones[phones != "SIL"]){
for(context in contexts){
subset = brent.cons[brent.cons$phone==phone & brent.cons$context==context, ]
if(length(unique(subset$talker))==length(talkers)){
write.table(subset, append = TRUE, sep = " ", col.names= FALSE, row.names = FALSE, quote = FALSE, file = consfile)
}
}
}
phones = levels(as.factor(brent.vowels$phone))
for(phone in phones){
for(context in contexts){
subset = brent.vowels[brent.vowels$phone==phone & brent.vowels$context==context, ]
if(length(unique(subset$talker))==length(talkers)){
write.table(subset, append = TRUE, sep = " ", col.names= FALSE, row.names = FALSE, quote = FALSE, file = vowfile)
}
}
}
|
# look at 1990 land area to water area
bg00 <- st_read(here("data/geopackage/nhgis_block_groups.gpkg"), layer = "US_block_groups_2000")
bg00$pctWat <- bg00$AWATER00/(bg00$ALAND00+bg00$AWATER00)
plot(bg00$ALAND00~bg00$AWATER00)
ggplot(bg00)+
geom_histogram(aes(pctWat),bins=40)+
scale_x_continuous(labels = scales::percent)+
scale_y_continuous(labels=function(x) format(x, big.mark = ",", scientific = FALSE))+
labs(title = "Percent Area of Block Groups Covered in Water",
x = "Percent Water",
y = "Number of Block Groups")
| /scripts/misc/percent_water_1990.R | no_license | ARMurray/DomesticWells | R | false | false | 551 | r | # look at 1990 land area to water area
bg00 <- st_read(here("data/geopackage/nhgis_block_groups.gpkg"), layer = "US_block_groups_2000")
bg00$pctWat <- bg00$AWATER00/(bg00$ALAND00+bg00$AWATER00)
plot(bg00$ALAND00~bg00$AWATER00)
ggplot(bg00)+
geom_histogram(aes(pctWat),bins=40)+
scale_x_continuous(labels = scales::percent)+
scale_y_continuous(labels=function(x) format(x, big.mark = ",", scientific = FALSE))+
labs(title = "Percent Area of Block Groups Covered in Water",
x = "Percent Water",
y = "Number of Block Groups")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bindArr.r
\name{bindArr}
\alias{bindArr}
\title{concatenate multiple arrays/matrices}
\usage{
bindArr(..., along = 1)
}
\arguments{
\item{\dots}{matrices and/or arrays with appropriate dimensionality to
combine to one array, or a single list containing suitable matrices, or arrays).}
\item{along}{dimension along which to concatenate.}
}
\value{
returns array of combined matrices/arrays
}
\description{
concatenate multiple 3-dimensional arrays and/or 2-dimensional matrices to
one big array
}
\details{
dimnames, if present and if differing between entries, will be concatenated, separated by a "_".
}
\examples{
A <- matrix(rnorm(18),6,3)
B <- matrix(rnorm(18),6,3)
C <- matrix(rnorm(18),6,3)
#combine to 3D-array
newArr <- bindArr(A,B,C,along=3)
#combine along first dimension
newArr2 <- bindArr(newArr,newArr,along=1)
}
\seealso{
\code{\link{cbind}}, \code{\link{rbind}}, \code{\link{array}}
}
| /fuzzedpackages/Morpho/man/bindArr.Rd | no_license | akhikolla/testpackages | R | false | true | 984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bindArr.r
\name{bindArr}
\alias{bindArr}
\title{concatenate multiple arrays/matrices}
\usage{
bindArr(..., along = 1)
}
\arguments{
\item{\dots}{matrices and/or arrays with appropriate dimensionality to
combine to one array, or a single list containing suitable matrices, or arrays).}
\item{along}{dimension along which to concatenate.}
}
\value{
returns array of combined matrices/arrays
}
\description{
concatenate multiple 3-dimensional arrays and/or 2-dimensional matrices to
one big array
}
\details{
dimnames, if present and if differing between entries, will be concatenated, separated by a "_".
}
\examples{
A <- matrix(rnorm(18),6,3)
B <- matrix(rnorm(18),6,3)
C <- matrix(rnorm(18),6,3)
#combine to 3D-array
newArr <- bindArr(A,B,C,along=3)
#combine along first dimension
newArr2 <- bindArr(newArr,newArr,along=1)
}
\seealso{
\code{\link{cbind}}, \code{\link{rbind}}, \code{\link{array}}
}
|
#2018/09/17 CP
#Compare the strength of interactions with the effect of covariates
rm(list=ls())
graphics.off()
library(MARSS)
source("script/matrix_MAR_clean.r")
option_model="pencen"
option_NEI="null" #We don't take the "Not Elsewhere Identified" species into account
groupe=c("BZ","MO","AR","SU")
option_sp="common" #Species are the same
take0=TRUE
methods=c("intra_mean","intra_var","inter_mean_abs","inter_mean_raw","inter_var_raw","inter_var_abs","evt_abs","evt_raw")
id_lieu=0
if(take0){
extant="with0"
}else{
extant="no0"
}
tab_value=matrix(NA,nrow=10,ncol=length(methods))
colnames(tab_value)=methods
rownames(tab_value)=rep("",10)
colo=c()
for (g in groupe){
if(g=="BZ"){
option_lieu=c("Men er Roue","Loscolo","Croisic")
colo=c(colo,rep("green",length(option_lieu)))
}else if(g=="MO"){
option_lieu=c("LEperon","Cornard","Auger")
colo=c(colo,rep("darkblue",length(option_lieu)))
}else if(g=="SU"){
option_lieu=c("Antoine","Lazaret")
colo=c(colo,rep("darkred",length(option_lieu)))
}else if(g=="AR"){
option_lieu=c("Teychan","B7")
colo=c(colo,rep("cyan",length(option_lieu)))
}
for (ll in 1:length(option_lieu)){
id_lieu=id_lieu+1
rownames(tab_value)[id_lieu]=option_lieu[ll]
f1=paste("data/analyse_MAR/",g,"/site_specific/",option_lieu[ll],"_",option_model,"_",option_NEI,"_regular_common_",g,".RData",sep="")
load(f1)
B=clean_matrix(cis)
B_nodiag=B
diag(B_nodiag)=NA
tab_value[id_lieu,"intra_mean"]=mean(diag(B))
tab_value[id_lieu,"intra_var"]=var(diag(B))
if(!take0){
B_nodiag[B_nodiag==0]=NA
}
tab_value[id_lieu,"inter_mean_abs"]=mean(c(abs(B_nodiag)),na.rm=T)
tab_value[id_lieu,"inter_mean_raw"]=mean(c(B_nodiag),na.rm=T)
tab_value[id_lieu,"inter_var_raw"]=var(c(B_nodiag),na.rm=T)
tab_value[id_lieu,"inter_var_abs"]=var(c(abs(B_nodiag)),na.rm=T)
tab_value[id_lieu,"evt_abs"]=mean(abs(cis$par$U))
tab_value[id_lieu,"evt_raw"]=mean(cis$par$U)
}
}
pdf(paste("Rapport/graphe/MAR_estimates/inter_vs_evt_",option_model,"_",extant,".pdf",sep=""),width=11)
par(mfrow=c(1,2),mar=c(4,4,1,1))
plot(tab_value[,"intra_mean"],tab_value[,"evt_abs"],xlab="Intra",ylab="Evt",pch=16,col=colo,cex=2)
plot(tab_value[,"inter_mean_abs"],tab_value[,"evt_abs"],xlab="Inter",ylab="",pch=16,col=colo,cex=2)
dev.off()
| /script/exploratory/compare_evt_interaction.r | no_license | CoraliePicoche/REPHY-littoral | R | false | false | 2,792 | r | #2018/09/17 CP
#Compare the strength of interactions with the effect of covariates
rm(list=ls())
graphics.off()
library(MARSS)
source("script/matrix_MAR_clean.r")
option_model="pencen"
option_NEI="null" #We don't take the "Not Elsewhere Identified" species into account
groupe=c("BZ","MO","AR","SU")
option_sp="common" #Species are the same
take0=TRUE
methods=c("intra_mean","intra_var","inter_mean_abs","inter_mean_raw","inter_var_raw","inter_var_abs","evt_abs","evt_raw")
id_lieu=0
if(take0){
extant="with0"
}else{
extant="no0"
}
tab_value=matrix(NA,nrow=10,ncol=length(methods))
colnames(tab_value)=methods
rownames(tab_value)=rep("",10)
colo=c()
for (g in groupe){
if(g=="BZ"){
option_lieu=c("Men er Roue","Loscolo","Croisic")
colo=c(colo,rep("green",length(option_lieu)))
}else if(g=="MO"){
option_lieu=c("LEperon","Cornard","Auger")
colo=c(colo,rep("darkblue",length(option_lieu)))
}else if(g=="SU"){
option_lieu=c("Antoine","Lazaret")
colo=c(colo,rep("darkred",length(option_lieu)))
}else if(g=="AR"){
option_lieu=c("Teychan","B7")
colo=c(colo,rep("cyan",length(option_lieu)))
}
for (ll in 1:length(option_lieu)){
id_lieu=id_lieu+1
rownames(tab_value)[id_lieu]=option_lieu[ll]
f1=paste("data/analyse_MAR/",g,"/site_specific/",option_lieu[ll],"_",option_model,"_",option_NEI,"_regular_common_",g,".RData",sep="")
load(f1)
B=clean_matrix(cis)
B_nodiag=B
diag(B_nodiag)=NA
tab_value[id_lieu,"intra_mean"]=mean(diag(B))
tab_value[id_lieu,"intra_var"]=var(diag(B))
if(!take0){
B_nodiag[B_nodiag==0]=NA
}
tab_value[id_lieu,"inter_mean_abs"]=mean(c(abs(B_nodiag)),na.rm=T)
tab_value[id_lieu,"inter_mean_raw"]=mean(c(B_nodiag),na.rm=T)
tab_value[id_lieu,"inter_var_raw"]=var(c(B_nodiag),na.rm=T)
tab_value[id_lieu,"inter_var_abs"]=var(c(abs(B_nodiag)),na.rm=T)
tab_value[id_lieu,"evt_abs"]=mean(abs(cis$par$U))
tab_value[id_lieu,"evt_raw"]=mean(cis$par$U)
}
}
pdf(paste("Rapport/graphe/MAR_estimates/inter_vs_evt_",option_model,"_",extant,".pdf",sep=""),width=11)
par(mfrow=c(1,2),mar=c(4,4,1,1))
plot(tab_value[,"intra_mean"],tab_value[,"evt_abs"],xlab="Intra",ylab="Evt",pch=16,col=colo,cex=2)
plot(tab_value[,"inter_mean_abs"],tab_value[,"evt_abs"],xlab="Inter",ylab="",pch=16,col=colo,cex=2)
dev.off()
|
\encoding{UTF-8}
\name{relate.levels}
\alias{relate.levels}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Relates two clustering level results.
}
\description{
Analyzes how lower level clusters are assigned into upper level ones. The analysis is made for several number of clusters.
}
\usage{
relate.levels(lower, upper, defuzzify = FALSE, excludeFixed = FALSE, verbose=FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lower}{ A list of objects of type \code{\link{vegclust}} or \code{\link{vegclass}} that represent classifications at a finer level of resolution.}
\item{upper}{ A list of objects of type \code{\link{vegclust}} or \code{\link{vegclass}} that represent classifications at an broader level of resolution.}
\item{defuzzify}{ A logical flag used to indicate whether the result of calling \code{\link{crossmemb}} should be deffuzified.}
\item{excludeFixed}{A logical used to indicate whether fixed clusters should be excluded from the comparison of levels.}
\item{verbose}{ A flag used to ask for extra screen output.}
\item{...}{
Additional parameters for function \code{\link{defuzzify}}.
}
}
\details{
For each pair of \code{vegclust} (or \code{vegclass}) objects in \code{upper} and \code{lower}, the function calls function \code{\link{crossmemb}} and then, if asked, deffuzifies the resulting memberships (by calling function \code{\link{defuzzify}}) and several quantities are calculated (see 'value' section).
}
\value{
A list with several data frames (see below). In each of them, the rows are items of \code{upper} and columns are items of \code{lower}. The names of rows and columns are the number of clusters of each \code{\link{vegclust}} (or \code{vegclass}) object.
\item{nnoise}{The number of low level clusters that are assigned to the Noise class (for \code{upper} objects using Noise clustering). }
\item{maxnoise}{The maximum membership value of low level clusters to the Noise class (for \code{upper} objects using Noise clustering). }
\item{minmaxall}{The minimum value (across upper level clusters) of the maximum membership value observed among the lower level clusters. }
\item{minallsize}{The minimum value (across upper level clusters) of the sum of membership values across lower level clusters. }
\item{empty}{The number of upper level clusters (mobile or fixed) that do not have any member among the lower level clusters. }
}
\author{
Miquel De \enc{Cáceres}{Caceres}, CREAF
}
\seealso{
\code{\link{vegclust}}, \code{\link{vegclass}}, \code{\link{defuzzify}}
}
\examples{
## Loads data
data(wetland)
## This equals the chord transformation
## (see also \code{\link{decostand}} in package vegan)
wetland.chord = as.data.frame(sweep(as.matrix(wetland), 1,
sqrt(rowSums(as.matrix(wetland)^2)), "/"))
## Create noise clustering from hierarchical clustering at different number of cluster
wetland.hc = hclust(dist(wetland.chord),method="ward")
wetland.nc1 = hier.vegclust(wetland.chord, wetland.hc, cmin=2, cmax=6, m = 1.2,
dnoise=0.75, method="NC")
wetland.nc2 = hier.vegclust(wetland.chord, wetland.hc, cmin=2, cmax=4, m = 1.2,
dnoise=0.85, method="NC")
## Studies the assignment of levels
relate.levels(wetland.nc1, wetland.nc2, method="cut")
}
| /man/relate.levels.Rd | no_license | cran/vegclust | R | false | false | 3,375 | rd | \encoding{UTF-8}
\name{relate.levels}
\alias{relate.levels}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Relates two clustering level results.
}
\description{
Analyzes how lower level clusters are assigned into upper level ones. The analysis is made for several number of clusters.
}
\usage{
relate.levels(lower, upper, defuzzify = FALSE, excludeFixed = FALSE, verbose=FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lower}{ A list of objects of type \code{\link{vegclust}} or \code{\link{vegclass}} that represent classifications at a finer level of resolution.}
\item{upper}{ A list of objects of type \code{\link{vegclust}} or \code{\link{vegclass}} that represent classifications at an broader level of resolution.}
\item{defuzzify}{ A logical flag used to indicate whether the result of calling \code{\link{crossmemb}} should be deffuzified.}
\item{excludeFixed}{A logical used to indicate whether fixed clusters should be excluded from the comparison of levels.}
\item{verbose}{ A flag used to ask for extra screen output.}
\item{...}{
Additional parameters for function \code{\link{defuzzify}}.
}
}
\details{
For each pair of \code{vegclust} (or \code{vegclass}) objects in \code{upper} and \code{lower}, the function calls function \code{\link{crossmemb}} and then, if asked, deffuzifies the resulting memberships (by calling function \code{\link{defuzzify}}) and several quantities are calculated (see 'value' section).
}
\value{
A list with several data frames (see below). In each of them, the rows are items of \code{upper} and columns are items of \code{lower}. The names of rows and columns are the number of clusters of each \code{\link{vegclust}} (or \code{vegclass}) object.
\item{nnoise}{The number of low level clusters that are assigned to the Noise class (for \code{upper} objects using Noise clustering). }
\item{maxnoise}{The maximum membership value of low level clusters to the Noise class (for \code{upper} objects using Noise clustering). }
\item{minmaxall}{The minimum value (across upper level clusters) of the maximum membership value observed among the lower level clusters. }
\item{minallsize}{The minimum value (across upper level clusters) of the sum of membership values across lower level clusters. }
\item{empty}{The number of upper level clusters (mobile or fixed) that do not have any member among the lower level clusters. }
}
\author{
Miquel De \enc{Cáceres}{Caceres}, CREAF
}
\seealso{
\code{\link{vegclust}}, \code{\link{vegclass}}, \code{\link{defuzzify}}
}
\examples{
## Loads data
data(wetland)
## This equals the chord transformation
## (see also \code{\link{decostand}} in package vegan)
wetland.chord = as.data.frame(sweep(as.matrix(wetland), 1,
sqrt(rowSums(as.matrix(wetland)^2)), "/"))
## Create noise clustering from hierarchical clustering at different number of cluster
wetland.hc = hclust(dist(wetland.chord),method="ward")
wetland.nc1 = hier.vegclust(wetland.chord, wetland.hc, cmin=2, cmax=6, m = 1.2,
dnoise=0.75, method="NC")
wetland.nc2 = hier.vegclust(wetland.chord, wetland.hc, cmin=2, cmax=4, m = 1.2,
dnoise=0.85, method="NC")
## Studies the assignment of levels
relate.levels(wetland.nc1, wetland.nc2, method="cut")
}
|
#' Weather data from Macleish Field Stations
#'
#' @description Weather data collected at the Macleish Field Station in Whately,
#' MA during 2015.
#'
#' @details The Macleish Field Station is a remote outpost owned by Smith
#' College and used for field research. There are two weather stations on the
#' premises. One is called \code{WhatelyMet} and the other is \code{OrchardMet}.
#'
#' The \code{WhatelyMet} station is located at (42.448470, -72.680553) and
#' the \code{OrchardMet} station is at (42.449653, -72.680315).
#'
#' \code{WhatelyMet} is located at the end of Poplar Hill Road in Whately,
#' Massachusetts, USA. The meteorological instruments of \code{WhatelyMet} (except the
#' rain gauge) are mounted at the top of a tower 25.3 m tall, well above the
#' surrounding forest canopy. The tower is located on a local ridge at an
#' elevation 250.75m above sea level.
#'
#' \code{OrchardMet} is located about 250 m north of the first tower in an open
#' field next to an apple orchard. Full canopy trees (~20 m tall) are within
#' 30 m of this station. This station has a standard instrument configuration
#' with temperature, relative humidity, solar radiation, and barometric
#' pressure measured between 1.5 and 2.0 m above the ground. Wind speed and
#' direction are measured on a 10 m tall tower and precipitation is measured
#' on the ground. Ground temperature is measured at 15 and 30 cm below the
#' ground surface 2 m south of the tower. The tower is located 258.1 m above
#' sea level. Data collection at OrchardMet began on June 27th, 2014.
#'
#' The variables shown above are weather data collected at \code{WhatelyMet} and
#' \code{OrchardMet} during 2015. Solar radiation is measured in two different ways:
#' see \code{SlrW_Avg}or the \code{PAR} variables for Photosynthetic Active Radiation.
#'
#' Note that a loose wire resulted in erroneous temperature reading at OrchardMet
#' in late November, 2015.
#'
#' @docType data
#' @format For both, a data frame (\code{\link[dplyr]{tbl_df}}) with roughly 52,560 rows and 8 or 9 variables.
#'
#' The following variables are values that are found in either the \code{whately_2015}
#' or \code{orchard_2015} data tables.
#'
#' All variables are averaged over the 10 minute interval unless otherwise noted.
#' \describe{
#' \item{when}{Timestamp for each measurment set in Eastern Standard Time.}
#' \item{temperature}{average temperature, in Celsius}
#' \item{wind_speed}{Wind speed, in meters per second}
#' \item{wind_dir}{Wind direction, in degrees}
#' \item{rel_humidity}{How much water there is in the air, in millimeters}
#' \item{pressure}{Atmospheric pressure, in millibars}
#' \item{rainfall}{Total rainfall, in millimeters}
#' \item{solar_radiation}{Amount of radiation coming from the sun, in Watts/meters^2. Solar measurement for Whately}
#' \item{par_density}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average density of Watts/meters^2. One of two solar measurements for Orchard}
#' \item{par_total}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average total over measurement period of Watts/meters^2. One of two solar measurements for Orchard}
#' }
#' @source These data are recorded at \url{http://www.smith.edu/ceeds/macleish_monitoring.php}
"whately_2015"
#' @rdname whately_2015
#' @examples
#'
#' \dontrun{
#' #' # loose wire anomalies
#' if (require(dplyr) & require(ggplot2) & require(lubridate)) {
#' orchard_2015 %>%
#' filter(month(when) == 11) %>%
#' ggplot(aes(x = when, y = temperature)) +
#' geom_line() + geom_smooth()
#' }
#' }
"orchard_2015"
#' MacLeish spatial data
#'
#' @description Shapefiles from the MacLeish Field Station. The field station
#' itself is located at \code{lat = 42.449167}, \code{lon = -72.679389}. These
#' data contain information about various man-made and natural structures
#' surrounding the field station.
#'
#' @details Each of the \code{\link[sf]{sf}} objects are projected in
#' \code{epsg:4326} for easy integration with \code{\link[ggmap]{ggmap}} or
#' \code{\link[leaflet]{leaflet}} objects.
#'
#' @docType data
#' @format A \code{list} of \code{\link[sf]{sf}} objects, each providing a different layer.
#'
#' \describe{
#' \item{landmarks}{Landmarks}
#' \item{forests}{Type of dominant tree in individual forests, as
#' noted by Jesse Bellemare}
#' \item{streams}{local streams}
#' \item{challenge_courses}{Challenge courses on the property}
#' \item{reservoir}{a reservoir}
#' \item{buildings}{Buildings at MacLeish}
#' \item{wetlands}{Wetland areas}
#' \item{trails}{Hiking trails}
#' \item{schools}{Schools in Massachusetts}
#' \item{boundary}{the property boundary}
#' \item{research}{research plots}
#' \item{soil}{soil deposits used by Amy Rhodes}
#' \item{contours_30ft}{30 foot contours}
#' \item{contours_3m}{3 meter contours}
#' }
#'
#' @examples
#' names(macleish_layers)
#' macleish_layers[["buildings"]]
#'
#' if (require(sf)) {
#' plot(macleish_layers[["buildings"]])
#' }
#'
"macleish_layers"
#' Maple sap collection at MacLeish
#' @docType data
#' @format
#' \describe{
#' \item{when}{the date of collection}
#' \item{sap}{how much sap was collected, in gallons}
#' \item{Comments}{comments}
#' \item{People}{who was there?}
#' }
"maple_sap" | /R/data.R | no_license | dcaravela/macleish | R | false | false | 5,350 | r | #' Weather data from Macleish Field Stations
#'
#' @description Weather data collected at the Macleish Field Station in Whately,
#' MA during 2015.
#'
#' @details The Macleish Field Station is a remote outpost owned by Smith
#' College and used for field research. There are two weather stations on the
#' premises. One is called \code{WhatelyMet} and the other is \code{OrchardMet}.
#'
#' The \code{WhatelyMet} station is located at (42.448470, -72.680553) and
#' the \code{OrchardMet} station is at (42.449653, -72.680315).
#'
#' \code{WhatelyMet} is located at the end of Poplar Hill Road in Whately,
#' Massachusetts, USA. The meteorological instruments of \code{WhatelyMet} (except the
#' rain gauge) are mounted at the top of a tower 25.3 m tall, well above the
#' surrounding forest canopy. The tower is located on a local ridge at an
#' elevation 250.75m above sea level.
#'
#' \code{OrchardMet} is located about 250 m north of the first tower in an open
#' field next to an apple orchard. Full canopy trees (~20 m tall) are within
#' 30 m of this station. This station has a standard instrument configuration
#' with temperature, relative humidity, solar radiation, and barometric
#' pressure measured between 1.5 and 2.0 m above the ground. Wind speed and
#' direction are measured on a 10 m tall tower and precipitation is measured
#' on the ground. Ground temperature is measured at 15 and 30 cm below the
#' ground surface 2 m south of the tower. The tower is located 258.1 m above
#' sea level. Data collection at OrchardMet began on June 27th, 2014.
#'
#' The variables shown above are weather data collected at \code{WhatelyMet} and
#' \code{OrchardMet} during 2015. Solar radiation is measured in two different ways:
#' see \code{SlrW_Avg}or the \code{PAR} variables for Photosynthetic Active Radiation.
#'
#' Note that a loose wire resulted in erroneous temperature reading at OrchardMet
#' in late November, 2015.
#'
#' @docType data
#' @format For both, a data frame (\code{\link[dplyr]{tbl_df}}) with roughly 52,560 rows and 8 or 9 variables.
#'
#' The following variables are values that are found in either the \code{whately_2015}
#' or \code{orchard_2015} data tables.
#'
#' All variables are averaged over the 10 minute interval unless otherwise noted.
#' \describe{
#' \item{when}{Timestamp for each measurment set in Eastern Standard Time.}
#' \item{temperature}{average temperature, in Celsius}
#' \item{wind_speed}{Wind speed, in meters per second}
#' \item{wind_dir}{Wind direction, in degrees}
#' \item{rel_humidity}{How much water there is in the air, in millimeters}
#' \item{pressure}{Atmospheric pressure, in millibars}
#' \item{rainfall}{Total rainfall, in millimeters}
#' \item{solar_radiation}{Amount of radiation coming from the sun, in Watts/meters^2. Solar measurement for Whately}
#' \item{par_density}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average density of Watts/meters^2. One of two solar measurements for Orchard}
#' \item{par_total}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average total over measurement period of Watts/meters^2. One of two solar measurements for Orchard}
#' }
#' @source These data are recorded at \url{http://www.smith.edu/ceeds/macleish_monitoring.php}
"whately_2015"
#' @rdname whately_2015
#' @examples
#'
#' \dontrun{
#' #' # loose wire anomalies
#' if (require(dplyr) & require(ggplot2) & require(lubridate)) {
#' orchard_2015 %>%
#' filter(month(when) == 11) %>%
#' ggplot(aes(x = when, y = temperature)) +
#' geom_line() + geom_smooth()
#' }
#' }
"orchard_2015"
#' MacLeish spatial data
#'
#' @description Shapefiles from the MacLeish Field Station. The field station
#' itself is located at \code{lat = 42.449167}, \code{lon = -72.679389}. These
#' data contain information about various man-made and natural structures
#' surrounding the field station.
#'
#' @details Each of the \code{\link[sf]{sf}} objects are projected in
#' \code{epsg:4326} for easy integration with \code{\link[ggmap]{ggmap}} or
#' \code{\link[leaflet]{leaflet}} objects.
#'
#' @docType data
#' @format A \code{list} of \code{\link[sf]{sf}} objects, each providing a different layer.
#'
#' \describe{
#' \item{landmarks}{Landmarks}
#' \item{forests}{Type of dominant tree in individual forests, as
#' noted by Jesse Bellemare}
#' \item{streams}{local streams}
#' \item{challenge_courses}{Challenge courses on the property}
#' \item{reservoir}{a reservoir}
#' \item{buildings}{Buildings at MacLeish}
#' \item{wetlands}{Wetland areas}
#' \item{trails}{Hiking trails}
#' \item{schools}{Schools in Massachusetts}
#' \item{boundary}{the property boundary}
#' \item{research}{research plots}
#' \item{soil}{soil deposits used by Amy Rhodes}
#' \item{contours_30ft}{30 foot contours}
#' \item{contours_3m}{3 meter contours}
#' }
#'
#' @examples
#' names(macleish_layers)
#' macleish_layers[["buildings"]]
#'
#' if (require(sf)) {
#' plot(macleish_layers[["buildings"]])
#' }
#'
"macleish_layers"
#' Maple sap collection at MacLeish
#' @docType data
#' @format
#' \describe{
#' \item{when}{the date of collection}
#' \item{sap}{how much sap was collected, in gallons}
#' \item{Comments}{comments}
#' \item{People}{who was there?}
#' }
"maple_sap" |
simSyn <-
function(sims,weight) {
# 入力ベクトルの長さをチェック
len = length(sims)
if (len!=length(weight)) {
stop(message="different lengths between sims and weight")
}
# 類似度を合成する
sim <- weight[1] * sims[[1]]
for (i in 2:len) {
sim <- sim + weight[i] * sims[[i]]
}
return(sim)
}
| /smdc/R/simSyn.R | no_license | ingted/R-Examples | R | false | false | 361 | r | simSyn <-
function(sims,weight) {
# 入力ベクトルの長さをチェック
len = length(sims)
if (len!=length(weight)) {
stop(message="different lengths between sims and weight")
}
# 類似度を合成する
sim <- weight[1] * sims[[1]]
for (i in 2:len) {
sim <- sim + weight[i] * sims[[i]]
}
return(sim)
}
|
model_cropheatflux <- function (netRadiationEquivalentEvaporation = 638.142,
soilHeatFlux = 188.817,
potentialTranspiration = 1.413){
#'- Name: CropHeatFlux -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: CropHeatFlux Model
#' * Author: Pierre Martre
#' * Reference: abModelling energy balance in the wheat crop model SiriusQuality2:
#' Evapotranspiration and canopy and soil temperature calculations
#' * Institution: INRA/LEPSE Montpellier
#' * ExtendedDescription: It is calculated from net Radiation, soil heat flux and potential transpiration
#' * ShortDescription: It calculates the crop heat flux
#'
#'- inputs:
#' * name: netRadiationEquivalentEvaporation
#' ** variablecategory : auxiliary
#' ** description : net Radiation Equivalent Evaporation
#' ** datatype : DOUBLE
#' ** default : 638.142
#' ** min : 0
#' ** max : 10000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: soilHeatFlux
#' ** description : soil Heat Flux
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** default : 188.817
#' ** min : 0
#' ** max : 1000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: potentialTranspiration
#' ** description : potential Transpiration
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** default : 1.413
#' ** min : 0
#' ** max : 1000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#'- outputs:
#' * name: cropHeatFlux
#' ** description : crop Heat Flux
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
cropHeatFlux <- netRadiationEquivalentEvaporation - soilHeatFlux - potentialTranspiration
return (list('cropHeatFlux' = cropHeatFlux))
} | /energybalance_pkg/src/r/SQ_Energy_Balance/Cropheatflux.r | permissive | AgriculturalModelExchangeInitiative/Tutorial | R | false | false | 3,146 | r | model_cropheatflux <- function (netRadiationEquivalentEvaporation = 638.142,
soilHeatFlux = 188.817,
potentialTranspiration = 1.413){
#'- Name: CropHeatFlux -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: CropHeatFlux Model
#' * Author: Pierre Martre
#' * Reference: abModelling energy balance in the wheat crop model SiriusQuality2:
#' Evapotranspiration and canopy and soil temperature calculations
#' * Institution: INRA/LEPSE Montpellier
#' * ExtendedDescription: It is calculated from net Radiation, soil heat flux and potential transpiration
#' * ShortDescription: It calculates the crop heat flux
#'
#'- inputs:
#' * name: netRadiationEquivalentEvaporation
#' ** variablecategory : auxiliary
#' ** description : net Radiation Equivalent Evaporation
#' ** datatype : DOUBLE
#' ** default : 638.142
#' ** min : 0
#' ** max : 10000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: soilHeatFlux
#' ** description : soil Heat Flux
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** default : 188.817
#' ** min : 0
#' ** max : 1000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: potentialTranspiration
#' ** description : potential Transpiration
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** default : 1.413
#' ** min : 0
#' ** max : 1000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#'- outputs:
#' * name: cropHeatFlux
#' ** description : crop Heat Flux
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : g m-2 d-1
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
cropHeatFlux <- netRadiationEquivalentEvaporation - soilHeatFlux - potentialTranspiration
return (list('cropHeatFlux' = cropHeatFlux))
} |
\name{be.zeroinfl}
\alias{be.zeroinfl}
\title{conduct backward stepwise variable elimination for zero inflated count regression}
\description{
conduct backward stepwise variable elimination for zero inflated count regression from zeroinfl function}
\usage{
be.zeroinfl(object, data, dist=c("poisson", "negbin", "geometric"), alpha=0.05,
trace=FALSE)
}
\arguments{
\item{object}{an object from function zeroinfl}
\item{data}{argument controlling formula processing
via \code{\link[stats]{model.frame}}.}
\item{dist}{one of the distributions in zeroinfl function}
\item{alpha}{significance level of variable elimination}
\item{trace}{logical value, if TRUE, print detailed calculation results}
}
\details{
conduct backward stepwise variable elimination for zero inflated count regression from zeroinfl function}
\value{an object of zeroinfl with all variables having p-values less than the significance level \code{alpha}}
\references{
Zhu Wang, Shuangge Ma, Ching-Yun Wang, Michael Zappitelli, Prasad Devarajan and Chirag R. Parikh (2014)
\emph{EM for Regularized Zero Inflated Regression Models with Applications to Postoperative Morbidity after Cardiac Surgery in Children}, \emph{Statistics in Medicine}. 33(29):5192-208.
Zhu Wang, Shuangge Ma and Ching-Yun Wang (2015) \emph{Variable selection for zero-inflated and overdispersed data with application to health care demand in Germany}, \emph{Biometrical Journal}. 57(5):867-84.
}
\author{Zhu Wang <wangz1@uthscsa.edu>}
\keyword{models}
\keyword{regression}
| /man/be_zeroinfl.Rd | no_license | zhuwang46/mpath | R | false | false | 1,550 | rd | \name{be.zeroinfl}
\alias{be.zeroinfl}
\title{conduct backward stepwise variable elimination for zero inflated count regression}
\description{
conduct backward stepwise variable elimination for zero inflated count regression from zeroinfl function}
\usage{
be.zeroinfl(object, data, dist=c("poisson", "negbin", "geometric"), alpha=0.05,
trace=FALSE)
}
\arguments{
\item{object}{an object from function zeroinfl}
\item{data}{argument controlling formula processing
via \code{\link[stats]{model.frame}}.}
\item{dist}{one of the distributions in zeroinfl function}
\item{alpha}{significance level of variable elimination}
\item{trace}{logical value, if TRUE, print detailed calculation results}
}
\details{
conduct backward stepwise variable elimination for zero inflated count regression from zeroinfl function}
\value{an object of zeroinfl with all variables having p-values less than the significance level \code{alpha}}
\references{
Zhu Wang, Shuangge Ma, Ching-Yun Wang, Michael Zappitelli, Prasad Devarajan and Chirag R. Parikh (2014)
\emph{EM for Regularized Zero Inflated Regression Models with Applications to Postoperative Morbidity after Cardiac Surgery in Children}, \emph{Statistics in Medicine}. 33(29):5192-208.
Zhu Wang, Shuangge Ma and Ching-Yun Wang (2015) \emph{Variable selection for zero-inflated and overdispersed data with application to health care demand in Germany}, \emph{Biometrical Journal}. 57(5):867-84.
}
\author{Zhu Wang <wangz1@uthscsa.edu>}
\keyword{models}
\keyword{regression}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGeneList.R
\name{getGeneList}
\alias{getGeneList}
\title{User needs to specify which gene is interesting to get genomic cancer data. The gene must be with Symbol and one gene by line.}
\usage{
getGeneList()
}
\value{
Gene list path of file
}
\description{
User needs to specify which gene is interesting to get genomic cancer data. The gene must be with Symbol and one gene by line.
}
\examples{
myGlobalEnv <- new.env(parent = emptyenv())
\dontrun{
getGeneList()
}
}
| /man/getGeneList.Rd | no_license | kmezhoud/canceR | R | false | true | 550 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGeneList.R
\name{getGeneList}
\alias{getGeneList}
\title{User needs to specify which gene is interesting to get genomic cancer data. The gene must be with Symbol and one gene by line.}
\usage{
getGeneList()
}
\value{
Gene list path of file
}
\description{
User needs to specify which gene is interesting to get genomic cancer data. The gene must be with Symbol and one gene by line.
}
\examples{
myGlobalEnv <- new.env(parent = emptyenv())
\dontrun{
getGeneList()
}
}
|
#' Load and preprocess microarray data
#'
#' Loads, preprocesses and annotates microarray data to be further used by
#' downstream functions in the \pkg{\link{piano}} package.
#'
#' This function requires at least two inputs: (1) data, either CEL files in
#' the directory specified by \code{datadir} or normalized data specified by
#' \code{dataNorm}, and (2) experimental setup specified by \code{setup}.
#'
#' The setup shold be either a tab delimited text file with column headers or a
#' \code{data.frame}. The first column should contain the names of the CEL
#' files or the column names used for the normalized data, please be sure to
#' use names valid as column names, e.g. avoid names starting with numbers.
#' Additional columns should assign attributes in some category to each array.
#' (For an example run the example below and look at the object
#' \code{myArrayData$setup}.)
#'
#' The \pkg{piano} package is customized for yeast 2.0 arrays and annotation
#' will work automatically, if the cdfName of the arrays equals \emph{Yeast_2}.
#' If using normalized yeast 2.0 data as input, the user needs to set the
#' argument \code{platform="yeast2"} to tell the function to use yeast
#' annotation. If other platforms than yeast 2.0 is used, set
#' \code{platform=NULL} (default) and supply appropriate annotation by the
#' argument \code{annotation}. Note that the cdfName will override
#' \code{platform}, so it can still be set to \code{NULL} for yeast 2.0 CEL
#' files. Note also that \code{annotation} overrides \code{platform}, so if the
#' user wants to use an alternative annotation for yeast, this can be done
#' simply by specifying this in \code{annotation}.
#'
#' The annotation should have the column headers \emph{Gene name},
#' \emph{Chromosome} and \emph{Chromosome location}. The \emph{Gene name} is
#' used in the heatmap in \code{diffExp} and the \emph{Chromosome} and
#' \emph{Chromosome location} is used by the \code{polarPlot}. The rownames (or
#' first column if using a text file) should contain the \emph{probe IDs}. If
#' using a text file the first column should have the header \emph{probeID} or
#' similar. The filtering step discards all probes not listed in the
#' annotation.
#'
#' Normalization is performed on all CEL file data using one of the Affymetrix
#' methods: PLIER (\code{"plier"}) as implemented by
#' \code{\link[plier:justPlier]{justPlier}}, RMA (Robust Multi-Array Average)
#' (\code{"rma"}) expression measure as implemented by
#' \code{\link[affy:rma]{rma}} or MAS 5.0 expression measure \code{"mas5"} as
#' implemented by \code{\link[affy:mas5]{mas5}}.
#'
#' It is possible to pass additional arguments to
#' \code{\link[affy:read.affybatch]{ReadAffy}}, e.g. \code{cdfname} as this
#' might be required for some types of CEL files.
#'
#' @param datadir character string giving the directory in which to look for
#' the data. Defaults to \code{getwd()}.
#' @param setup character string giving the name of the file containing the
#' experimental setup, or an object of class \code{data.frame} or similar
#' containing the experimental setup. Defaults to \code{"setup.txt"}, see
#' details below for more information.
#' @param dataNorm character string giving the name of the normalized data, or
#' an object of class \code{data.frame} or similar containing the normalized
#' data. Only to be used if the user wishes to start with normalized data
#' rather then CEL files.
#' @param platform character string giving the name of the platform, can be
#' either \code{"yeast2"} or \code{NULL}. See details below for more
#' information.
#' @param annotation character string giving the name of the annotation file,
#' or an object of class \code{data.frame} or similar containing the annotation
#' information. The annotation should consist of the columns \emph{Gene name},
#' \emph{Chromosome} and \emph{Chromosome location}. Not required if
#' \code{platform="yeast2"}.
#' @param normalization character string giving the normalization method, can
#' be either \code{"plier"}, \code{"rma"} or \code{"mas5"}. Defaults to
#' \code{"plier"}.
#' @param filter should the data be filtered? If \code{TRUE} then probes not
#' present in the annotation will be discarded. Defaults to \code{TRUE}.
#' @param verbose verbose? Defaults to \code{TRUE}.
#' @param \dots additional arguments to be passed to \code{ReadAffy}.
#' @return An \code{ArrayData} object (which is essentially a \code{list}) with
#' the following elements:
#'
#' \item{dataRaw}{raw data as an AffyBatch object}
#' \item{dataNorm}{\code{data.frame} containing normalized expression values}
#' \item{setup}{\code{data.frame} containing experimental setup}
#' \item{annotation}{\code{data.frame} containing annotation}
#'
#' Depending on input arguments the \code{ArrayData} object may not include
#' \code{dataRaw} and/or \code{annotation}.
#' @author Leif Varemo \email{piano.rpkg@@gmail.com} and Intawat Nookaew
#' \email{piano.rpkg@@gmail.com}
#' @seealso \pkg{\link{piano}}, \code{\link{runQC}}, \code{\link{diffExp}},
#' \code{\link[affy:read.affybatch]{ReadAffy}},
#' \code{\link[affy:expresso]{expresso}},
#' \code{\link[plier:justPlier]{justPlier}}, \code{\link[yeast2.db:yeast2BASE]{yeast2.db}}
#' @references Gautier, L., Cope, L., Bolstad, B. M., and Irizarry, R. A. affy
#' - analysis of Affymetrix GeneChip data at the probe level.
#' \emph{Bioinformatics.} \bold{20}, 3, 307-315 (2004).
#' @examples
#'
#' # Get path to example data and setup files:
#' dataPath <- system.file("extdata", package="piano")
#'
#' # Load normalized data:
#' myArrayData <- loadMAdata(datadir=dataPath, dataNorm="norm_data.txt.gz", platform="yeast2")
#'
#' # Print to look at details:
#' myArrayData
#'
#'
loadMAdata <- function(datadir=getwd(), setup="setup.txt", dataNorm,
platform="NULL", annotation, normalization="plier",
filter=TRUE, verbose=TRUE, ...) {
#if(!try(require(affy))) stop("package affy is missing") # old, line below is preferred:
if (!requireNamespace("affy", quietly = TRUE)) stop("package affy is missing")
#if(!try(require(plier))) stop("package plier is missing") # old, line below is preferred:
if (!requireNamespace("plier", quietly = TRUE)) stop("package plier is missing")
# Argument check:
if(!normalization %in% c("plier","rma","mas5")) {
stop("incorrect value of argument normalization")
}
if(!platform %in% c("NULL","yeast2")) {
stop("incorrect value of argument platform")
}
# Verbose function:
.verb <- function(mes, verbose) {
if(verbose == TRUE) {
message(mes)
}
}
# Load the data:
nCelFiles <- length(dir(path=datadir, pattern = ".*cel", all.files=FALSE,
full.names=FALSE, ignore.case = TRUE, recursive=FALSE))
if(nCelFiles > 0 & missing(dataNorm)) {
# Load CEL-files
.verb("Loading CEL files...", verbose)
dataRaw <- affy::ReadAffy(celfile.path=datadir, ...)
colnames(exprs(dataRaw)) <- gsub("\\.CEL","",colnames(exprs(dataRaw)), ignore.case=TRUE)
colnames(exprs(dataRaw)) <- gsub("\\.gz","",colnames(exprs(dataRaw)), ignore.case=TRUE)
if(sum(duplicated(colnames(exprs(dataRaw)))) > 0) stop("found samples with identical names")
.verb("...done", verbose)
} else if(!missing(dataNorm)) {
if(is(dataNorm, "character")) {
# If no CEL-files, or if selected, load txt-file
dataFilePath <- paste(datadir, "/", dataNorm, sep="")
if(!file.exists(dataFilePath)) {
stop("could not find the data file")
}
.verb("Loading data in text file...", verbose)
dataNorm <- as.data.frame(read.delim(dataFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
colnames(dataNorm) = gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) = gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
if(sum(duplicated(colnames(dataNorm))) > 0) stop("found samples with identical names")
.verb("...done", verbose)
#
} else {
dataNorm <- as.data.frame(dataNorm,stringsAsFactors=FALSE)
}
} else {
stop("could not find any data files in directory")
}
# This (above) creates object 'dataRaw' or 'dataNorm' depending on the input
# (cel or txt).
# Load the setup:
if(is(setup, "character")) {
setupFilePath <- paste(datadir, "/", setup, sep="")
if(!file.exists(setupFilePath)) {
stop("could not find the setup file")
}
.verb("Loading setup file...", verbose)
setup <- as.data.frame(read.delim(setupFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
rownames(setup) = gsub("\\.CEL","",rownames(setup), ignore.case=TRUE)
rownames(setup) = gsub("\\.gz","",rownames(setup), ignore.case=TRUE)
.verb("...done", verbose)
} else {
setup <- as.data.frame(setup, stringsAsFactors=FALSE)
}
# Normalize the raw data:
if(exists("dataRaw", inherits=FALSE)) {
# iterplier qubic spline
if(normalization == "plier") {
.verb("Preprocessing using PLIER with cubic spline normalization...", verbose)
dataNorm <- affy::normalize.AffyBatch.qspline(dataRaw, type="pmonly", verbose=FALSE)
tmp <- suppressWarnings(tmp <- capture.output(dataNorm <- plier::justPlier(dataNorm,normalize=FALSE,
usemm=FALSE, concpenalty=0.08,
plieriteration=30000)))
dataNorm <- as.data.frame(exprs(dataNorm),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
} else if(normalization == "rma") {
.verb("Preprocessing using RMA with quantile normalization...", verbose)
dataNorm <- affy::rma(dataRaw,verbose=FALSE)
dataNorm <- as.data.frame(exprs(dataNorm),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
} else if(normalization == "mas5") {
.verb("Preprocessing using MAS 5.0 with quantile normalization...", verbose)
dataNorm <- affy::mas5(dataRaw,verbose=FALSE)
dataNorm <- as.data.frame(log2(exprs(dataNorm)),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
}
} else {
.verb("Text file data: No normalization performed.", verbose)
}
# Check annotation
if(exists("dataRaw", inherits=FALSE)) {
if(affy::cdfName(dataRaw) == "Yeast_2") {
platform <- "yeast2"
}
}
if(platform == "yeast2" & missing(annotation)) {
annotationInfo <- "yeast2"
} else if(!missing(annotation)) {
annotationInfo <- "asArgument"
} else {
annotationInfo <- "none"
}
if(annotationInfo != "none") {
if(annotationInfo == "yeast2") {
#if(!try(require(yeast2.db))) stop("package yeast2.db is needed for annotationInfo='yeast2'") # old, line below is preferred:
if (!requireNamespace("yeast2.db", quietly = TRUE)) stop("package yeast2.db is missing")
if (!requireNamespace("AnnotationDbi", quietly = TRUE)) stop("package AnnotationDbi is missing")
# Annotate the probes using the yeast2.db package:
.verb("Creating annotation...", verbose)
# Gene name
geneName <- yeast2.db::yeast2ORF
geneName <- AnnotationDbi::toTable(geneName)
# Chromosome location
chromosome <- yeast2.db::yeast2CHRLOC
chromosome <- AnnotationDbi::toTable(chromosome)
chromosome <- chromosome[,c(1,3,2)]
# Probe id:s (corresponding to those in dataNorm)
probeID <- as.data.frame(rownames(dataNorm),stringsAsFactors=FALSE)
colnames(probeID) <- "probeID"
# Annotation data frame
annot <- merge(probeID,geneName,by.x="probeID",by.y="probe_id",all.x=TRUE)
annot <- merge(annot,chromosome,by.x="probeID",by.y="probe_id",all.x=TRUE)
rownames(annot) <- annot$probeID
annot <- annot[2:ncol(annot)]
colnames(annot) <- c("geneName","chromosome","start") # <- remove sys.name?
.verb("...done", verbose)
} else if(annotationInfo == "asArgument") {
# Else annotate from annotation-argument:
if(is(annotation, "character")) {
.verb("Creating annotation...", verbose)
annotFilePath <- paste(datadir, "/", annotation, sep="")
if(!file.exists(annotFilePath)) {
stop("could not find the annotation file")
}
annot <- as.data.frame(read.delim(annotFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
if(ncol(annot) != 3) {
stop("provided annotation has to contain 3 columns")
}
colnames(annot) <- c("geneName","chromosome","start")
.verb("...done", verbose)
} else {
annot <- annotation
annot[,1] <- as.character(annotation[,1])
annot[,2] <- as.character(annotation[,2])
}
# Check for NAs:
suppressWarnings(tmp <- as.numeric(as.character(annot[,3])))
if(!all(!is.na(tmp))) stop("the chromosome location in annotation has to be numerical")
annot[,3] <- tmp
# Remove mappings not in data:
annot <- annot[rownames(annot)%in%rownames(dataNorm),]
# Check for duplicates:
if(length(rownames(annot))!=length(unique(rownames(annot)))) {
stop("the annotation contains Gene name duplicates")
}
}
} else {
warning("no annotation created, may cause limitation in downstream functions")
}
if(filter == TRUE & exists("annot", inherits=FALSE)) {
# Remove unmapped probes:
.verb("Removing unmapped probes...", verbose)
mappedProbes <- rownames(annot)[!is.na(annot[,1])]
probes <- rownames(dataNorm)
dataNorm <- dataNorm[probes %in% mappedProbes,]
# Remove mappings not in data:
annot <- annot[rownames(annot) %in% rownames(dataNorm),]
.verb("..done", verbose)
} else if(filter == TRUE & !exists("annot", inherits=FALSE)) {
warning("annotation required for filtering, filtering step is omitted")
}
# Check sample name consistency:
tmp1 <- length(rownames(setup))
tmp2 <- length(colnames(dataNorm))
tmp3 <- length(c(1:tmp1)[rownames(setup) %in% colnames(dataNorm)])
tmp4 <- length(c(1:tmp2)[colnames(dataNorm) %in% rownames(setup)])
if(tmp1 != tmp2 | tmp3 != tmp4) {
stop("inconsistant sample names in dataNorm and setup")
}
# Construct ArrayData object as return:
if(exists("dataRaw", inherits=FALSE) & exists("annot", inherits=FALSE)) {
arrayData <- list(dataRaw=dataRaw, dataNorm=dataNorm, setup=setup, annotation=annot)
} else if(exists("annot", inherits=FALSE)){
arrayData <- list(dataNorm=dataNorm, setup=setup, annotation=annot)
} else if(exists("dataRaw", inherits=FALSE)) {
arrayData <- list(dataRaw=dataRaw, dataNorm=dataNorm, setup=setup)
} else {
arrayData <- list(dataNorm=dataNorm, setup=setup)
}
class(arrayData) <- "ArrayData"
return(arrayData)
}
| /R/loadMAdata.r | no_license | varemo/piano | R | false | false | 15,456 | r | #' Load and preprocess microarray data
#'
#' Loads, preprocesses and annotates microarray data to be further used by
#' downstream functions in the \pkg{\link{piano}} package.
#'
#' This function requires at least two inputs: (1) data, either CEL files in
#' the directory specified by \code{datadir} or normalized data specified by
#' \code{dataNorm}, and (2) experimental setup specified by \code{setup}.
#'
#' The setup shold be either a tab delimited text file with column headers or a
#' \code{data.frame}. The first column should contain the names of the CEL
#' files or the column names used for the normalized data, please be sure to
#' use names valid as column names, e.g. avoid names starting with numbers.
#' Additional columns should assign attributes in some category to each array.
#' (For an example run the example below and look at the object
#' \code{myArrayData$setup}.)
#'
#' The \pkg{piano} package is customized for yeast 2.0 arrays and annotation
#' will work automatically, if the cdfName of the arrays equals \emph{Yeast_2}.
#' If using normalized yeast 2.0 data as input, the user needs to set the
#' argument \code{platform="yeast2"} to tell the function to use yeast
#' annotation. If other platforms than yeast 2.0 is used, set
#' \code{platform=NULL} (default) and supply appropriate annotation by the
#' argument \code{annotation}. Note that the cdfName will override
#' \code{platform}, so it can still be set to \code{NULL} for yeast 2.0 CEL
#' files. Note also that \code{annotation} overrides \code{platform}, so if the
#' user wants to use an alternative annotation for yeast, this can be done
#' simply by specifying this in \code{annotation}.
#'
#' The annotation should have the column headers \emph{Gene name},
#' \emph{Chromosome} and \emph{Chromosome location}. The \emph{Gene name} is
#' used in the heatmap in \code{diffExp} and the \emph{Chromosome} and
#' \emph{Chromosome location} is used by the \code{polarPlot}. The rownames (or
#' first column if using a text file) should contain the \emph{probe IDs}. If
#' using a text file the first column should have the header \emph{probeID} or
#' similar. The filtering step discards all probes not listed in the
#' annotation.
#'
#' Normalization is performed on all CEL file data using one of the Affymetrix
#' methods: PLIER (\code{"plier"}) as implemented by
#' \code{\link[plier:justPlier]{justPlier}}, RMA (Robust Multi-Array Average)
#' (\code{"rma"}) expression measure as implemented by
#' \code{\link[affy:rma]{rma}} or MAS 5.0 expression measure \code{"mas5"} as
#' implemented by \code{\link[affy:mas5]{mas5}}.
#'
#' It is possible to pass additional arguments to
#' \code{\link[affy:read.affybatch]{ReadAffy}}, e.g. \code{cdfname} as this
#' might be required for some types of CEL files.
#'
#' @param datadir character string giving the directory in which to look for
#' the data. Defaults to \code{getwd()}.
#' @param setup character string giving the name of the file containing the
#' experimental setup, or an object of class \code{data.frame} or similar
#' containing the experimental setup. Defaults to \code{"setup.txt"}, see
#' details below for more information.
#' @param dataNorm character string giving the name of the normalized data, or
#' an object of class \code{data.frame} or similar containing the normalized
#' data. Only to be used if the user wishes to start with normalized data
#' rather then CEL files.
#' @param platform character string giving the name of the platform, can be
#' either \code{"yeast2"} or \code{NULL}. See details below for more
#' information.
#' @param annotation character string giving the name of the annotation file,
#' or an object of class \code{data.frame} or similar containing the annotation
#' information. The annotation should consist of the columns \emph{Gene name},
#' \emph{Chromosome} and \emph{Chromosome location}. Not required if
#' \code{platform="yeast2"}.
#' @param normalization character string giving the normalization method, can
#' be either \code{"plier"}, \code{"rma"} or \code{"mas5"}. Defaults to
#' \code{"plier"}.
#' @param filter should the data be filtered? If \code{TRUE} then probes not
#' present in the annotation will be discarded. Defaults to \code{TRUE}.
#' @param verbose verbose? Defaults to \code{TRUE}.
#' @param \dots additional arguments to be passed to \code{ReadAffy}.
#' @return An \code{ArrayData} object (which is essentially a \code{list}) with
#' the following elements:
#'
#' \item{dataRaw}{raw data as an AffyBatch object}
#' \item{dataNorm}{\code{data.frame} containing normalized expression values}
#' \item{setup}{\code{data.frame} containing experimental setup}
#' \item{annotation}{\code{data.frame} containing annotation}
#'
#' Depending on input arguments the \code{ArrayData} object may not include
#' \code{dataRaw} and/or \code{annotation}.
#' @author Leif Varemo \email{piano.rpkg@@gmail.com} and Intawat Nookaew
#' \email{piano.rpkg@@gmail.com}
#' @seealso \pkg{\link{piano}}, \code{\link{runQC}}, \code{\link{diffExp}},
#' \code{\link[affy:read.affybatch]{ReadAffy}},
#' \code{\link[affy:expresso]{expresso}},
#' \code{\link[plier:justPlier]{justPlier}}, \code{\link[yeast2.db:yeast2BASE]{yeast2.db}}
#' @references Gautier, L., Cope, L., Bolstad, B. M., and Irizarry, R. A. affy
#' - analysis of Affymetrix GeneChip data at the probe level.
#' \emph{Bioinformatics.} \bold{20}, 3, 307-315 (2004).
#' @examples
#'
#' # Get path to example data and setup files:
#' dataPath <- system.file("extdata", package="piano")
#'
#' # Load normalized data:
#' myArrayData <- loadMAdata(datadir=dataPath, dataNorm="norm_data.txt.gz", platform="yeast2")
#'
#' # Print to look at details:
#' myArrayData
#'
#'
loadMAdata <- function(datadir=getwd(), setup="setup.txt", dataNorm,
platform="NULL", annotation, normalization="plier",
filter=TRUE, verbose=TRUE, ...) {
#if(!try(require(affy))) stop("package affy is missing") # old, line below is preferred:
if (!requireNamespace("affy", quietly = TRUE)) stop("package affy is missing")
#if(!try(require(plier))) stop("package plier is missing") # old, line below is preferred:
if (!requireNamespace("plier", quietly = TRUE)) stop("package plier is missing")
# Argument check:
if(!normalization %in% c("plier","rma","mas5")) {
stop("incorrect value of argument normalization")
}
if(!platform %in% c("NULL","yeast2")) {
stop("incorrect value of argument platform")
}
# Verbose function:
.verb <- function(mes, verbose) {
if(verbose == TRUE) {
message(mes)
}
}
# Load the data:
nCelFiles <- length(dir(path=datadir, pattern = ".*cel", all.files=FALSE,
full.names=FALSE, ignore.case = TRUE, recursive=FALSE))
if(nCelFiles > 0 & missing(dataNorm)) {
# Load CEL-files
.verb("Loading CEL files...", verbose)
dataRaw <- affy::ReadAffy(celfile.path=datadir, ...)
colnames(exprs(dataRaw)) <- gsub("\\.CEL","",colnames(exprs(dataRaw)), ignore.case=TRUE)
colnames(exprs(dataRaw)) <- gsub("\\.gz","",colnames(exprs(dataRaw)), ignore.case=TRUE)
if(sum(duplicated(colnames(exprs(dataRaw)))) > 0) stop("found samples with identical names")
.verb("...done", verbose)
} else if(!missing(dataNorm)) {
if(is(dataNorm, "character")) {
# If no CEL-files, or if selected, load txt-file
dataFilePath <- paste(datadir, "/", dataNorm, sep="")
if(!file.exists(dataFilePath)) {
stop("could not find the data file")
}
.verb("Loading data in text file...", verbose)
dataNorm <- as.data.frame(read.delim(dataFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
colnames(dataNorm) = gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) = gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
if(sum(duplicated(colnames(dataNorm))) > 0) stop("found samples with identical names")
.verb("...done", verbose)
#
} else {
dataNorm <- as.data.frame(dataNorm,stringsAsFactors=FALSE)
}
} else {
stop("could not find any data files in directory")
}
# This (above) creates object 'dataRaw' or 'dataNorm' depending on the input
# (cel or txt).
# Load the setup:
if(is(setup, "character")) {
setupFilePath <- paste(datadir, "/", setup, sep="")
if(!file.exists(setupFilePath)) {
stop("could not find the setup file")
}
.verb("Loading setup file...", verbose)
setup <- as.data.frame(read.delim(setupFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
rownames(setup) = gsub("\\.CEL","",rownames(setup), ignore.case=TRUE)
rownames(setup) = gsub("\\.gz","",rownames(setup), ignore.case=TRUE)
.verb("...done", verbose)
} else {
setup <- as.data.frame(setup, stringsAsFactors=FALSE)
}
# Normalize the raw data:
if(exists("dataRaw", inherits=FALSE)) {
# iterplier qubic spline
if(normalization == "plier") {
.verb("Preprocessing using PLIER with cubic spline normalization...", verbose)
dataNorm <- affy::normalize.AffyBatch.qspline(dataRaw, type="pmonly", verbose=FALSE)
tmp <- suppressWarnings(tmp <- capture.output(dataNorm <- plier::justPlier(dataNorm,normalize=FALSE,
usemm=FALSE, concpenalty=0.08,
plieriteration=30000)))
dataNorm <- as.data.frame(exprs(dataNorm),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
} else if(normalization == "rma") {
.verb("Preprocessing using RMA with quantile normalization...", verbose)
dataNorm <- affy::rma(dataRaw,verbose=FALSE)
dataNorm <- as.data.frame(exprs(dataNorm),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
} else if(normalization == "mas5") {
.verb("Preprocessing using MAS 5.0 with quantile normalization...", verbose)
dataNorm <- affy::mas5(dataRaw,verbose=FALSE)
dataNorm <- as.data.frame(log2(exprs(dataNorm)),stringsAsFactors=FALSE)
colnames(dataNorm) <- gsub("\\.CEL","",colnames(dataNorm), ignore.case=TRUE)
colnames(dataNorm) <- gsub("\\.gz","",colnames(dataNorm), ignore.case=TRUE)
.verb("...done", verbose)
}
} else {
.verb("Text file data: No normalization performed.", verbose)
}
# Check annotation
if(exists("dataRaw", inherits=FALSE)) {
if(affy::cdfName(dataRaw) == "Yeast_2") {
platform <- "yeast2"
}
}
if(platform == "yeast2" & missing(annotation)) {
annotationInfo <- "yeast2"
} else if(!missing(annotation)) {
annotationInfo <- "asArgument"
} else {
annotationInfo <- "none"
}
if(annotationInfo != "none") {
if(annotationInfo == "yeast2") {
#if(!try(require(yeast2.db))) stop("package yeast2.db is needed for annotationInfo='yeast2'") # old, line below is preferred:
if (!requireNamespace("yeast2.db", quietly = TRUE)) stop("package yeast2.db is missing")
if (!requireNamespace("AnnotationDbi", quietly = TRUE)) stop("package AnnotationDbi is missing")
# Annotate the probes using the yeast2.db package:
.verb("Creating annotation...", verbose)
# Gene name
geneName <- yeast2.db::yeast2ORF
geneName <- AnnotationDbi::toTable(geneName)
# Chromosome location
chromosome <- yeast2.db::yeast2CHRLOC
chromosome <- AnnotationDbi::toTable(chromosome)
chromosome <- chromosome[,c(1,3,2)]
# Probe id:s (corresponding to those in dataNorm)
probeID <- as.data.frame(rownames(dataNorm),stringsAsFactors=FALSE)
colnames(probeID) <- "probeID"
# Annotation data frame
annot <- merge(probeID,geneName,by.x="probeID",by.y="probe_id",all.x=TRUE)
annot <- merge(annot,chromosome,by.x="probeID",by.y="probe_id",all.x=TRUE)
rownames(annot) <- annot$probeID
annot <- annot[2:ncol(annot)]
colnames(annot) <- c("geneName","chromosome","start") # <- remove sys.name?
.verb("...done", verbose)
} else if(annotationInfo == "asArgument") {
# Else annotate from annotation-argument:
if(is(annotation, "character")) {
.verb("Creating annotation...", verbose)
annotFilePath <- paste(datadir, "/", annotation, sep="")
if(!file.exists(annotFilePath)) {
stop("could not find the annotation file")
}
annot <- as.data.frame(read.delim(annotFilePath, header=TRUE, sep="\t",
row.names=1, as.is=TRUE,quote=""),stringsAsFactors=FALSE)
if(ncol(annot) != 3) {
stop("provided annotation has to contain 3 columns")
}
colnames(annot) <- c("geneName","chromosome","start")
.verb("...done", verbose)
} else {
annot <- annotation
annot[,1] <- as.character(annotation[,1])
annot[,2] <- as.character(annotation[,2])
}
# Check for NAs:
suppressWarnings(tmp <- as.numeric(as.character(annot[,3])))
if(!all(!is.na(tmp))) stop("the chromosome location in annotation has to be numerical")
annot[,3] <- tmp
# Remove mappings not in data:
annot <- annot[rownames(annot)%in%rownames(dataNorm),]
# Check for duplicates:
if(length(rownames(annot))!=length(unique(rownames(annot)))) {
stop("the annotation contains Gene name duplicates")
}
}
} else {
warning("no annotation created, may cause limitation in downstream functions")
}
if(filter == TRUE & exists("annot", inherits=FALSE)) {
# Remove unmapped probes:
.verb("Removing unmapped probes...", verbose)
mappedProbes <- rownames(annot)[!is.na(annot[,1])]
probes <- rownames(dataNorm)
dataNorm <- dataNorm[probes %in% mappedProbes,]
# Remove mappings not in data:
annot <- annot[rownames(annot) %in% rownames(dataNorm),]
.verb("..done", verbose)
} else if(filter == TRUE & !exists("annot", inherits=FALSE)) {
warning("annotation required for filtering, filtering step is omitted")
}
# Check sample name consistency:
tmp1 <- length(rownames(setup))
tmp2 <- length(colnames(dataNorm))
tmp3 <- length(c(1:tmp1)[rownames(setup) %in% colnames(dataNorm)])
tmp4 <- length(c(1:tmp2)[colnames(dataNorm) %in% rownames(setup)])
if(tmp1 != tmp2 | tmp3 != tmp4) {
stop("inconsistant sample names in dataNorm and setup")
}
# Construct ArrayData object as return:
if(exists("dataRaw", inherits=FALSE) & exists("annot", inherits=FALSE)) {
arrayData <- list(dataRaw=dataRaw, dataNorm=dataNorm, setup=setup, annotation=annot)
} else if(exists("annot", inherits=FALSE)){
arrayData <- list(dataNorm=dataNorm, setup=setup, annotation=annot)
} else if(exists("dataRaw", inherits=FALSE)) {
arrayData <- list(dataRaw=dataRaw, dataNorm=dataNorm, setup=setup)
} else {
arrayData <- list(dataNorm=dataNorm, setup=setup)
}
class(arrayData) <- "ArrayData"
return(arrayData)
}
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(5.97935653522269e-39, -2.27656404845097e-178, 7.93021381027407e-209, -1.0848902792759e+133, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, -8.12884111393297e+47, -9.02801919367885e-56, 1.44851647605435e+165, -5.21082596157426e+273, -2.14704541617865e-222, -3.05487700249276e+296, -1.09260594673836e+246, 0, 0), relh = c(Inf, 3.47174239177889e+24, NA), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 4.60714968529506e+22, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) | /meteor/inst/testfiles/E_Penman/AFL_E_Penman/E_Penman_valgrind_files/1615918488-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 973 | r | testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(5.97935653522269e-39, -2.27656404845097e-178, 7.93021381027407e-209, -1.0848902792759e+133, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, -8.12884111393297e+47, -9.02801919367885e-56, 1.44851647605435e+165, -5.21082596157426e+273, -2.14704541617865e-222, -3.05487700249276e+296, -1.09260594673836e+246, 0, 0), relh = c(Inf, 3.47174239177889e+24, NA), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 4.60714968529506e+22, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) |
# TODO: Add comment
#
# Author: pawelc
###############################################################################
#Task 2
1-pnorm(130,mean=100,sd=15)
pnorm(120,mean=100,sd=15)-pnorm(100,mean=100,sd=15)
| /module1/test.R | no_license | pawelc/AdvStatistics | R | false | false | 210 | r | # TODO: Add comment
#
# Author: pawelc
###############################################################################
#Task 2
1-pnorm(130,mean=100,sd=15)
pnorm(120,mean=100,sd=15)-pnorm(100,mean=100,sd=15)
|
#' Calculate a CE weighted quantiles
#'
#' @param ce_data A data frame containing at least a finlwt21 column and a cost
#' column. Both columns must be numeric.
#' @param probs A numeric vector of probabilities between 0 and 1 for which to
#' compute quantiles. Default is 0.5 (median).
#'
#' @return A two-column data frame in which the first column contains the
#' probabilities for which quantiles were calculated and their corresponding
#' quantiles in the second column.
#'
#' @export
#'
#' @importFrom dplyr group_by
#' @importFrom dplyr summarise
#'
#' @seealso \code{\link{ce_mean}}
#'
#' @examples
#' \dontrun{
#' # Download the HG file keeping the section for expenditures on utilities
#' utils_hg <- ce_hg(2017, interview) %>%
#' ce_uccs("Utilities, fuels, and public services", uccs_only = FALSE)
#'
#' # Download and prepare interview data
#' utils_interview <- ce_prepdata(
#' 2017,
#' interview,
#' uccs = ce_uccs(utils_hg, "Utilities, fuels, and public services"),
#' zp = NULL,
#' integrate_data = FALSE,
#' hg = utils_hg,
#' bls_urbn
#' )
#'
#' # Calculate the 25%, 50%, and 75% utilities expenditure quantiles
#' ce_quantiles(utils_interview)
#'
#' # Calculate the 25%, 50%, and 75% utilities expenditure quantiles by
#' # urbanicity
#' utils_interview %>%
#' tidyr::nest(-bls_urbn) %>%
#' mutate(quant_utils = purrr::map(data, ce_quantiles, c(0.25, 0.5, 0.75))) %>%
#' select(-data) %>%
#' unnest(quant_utils)
#' }
ce_quantiles <- function(ce_data, probs = 0.5) {
check_cols <- c("finlwt21", "cost")
if (length(setdiff(check_cols, names(ce_data))) > 0) {
stop("Your dataset needs to include 'finlwt21' and the 'cost' variable")
} else if (
length(setdiff(sapply(ce_data[, check_cols], class), "numeric")) > 0
) {
stop("'finlwt21' and the 'cost' variable must be numeric.")
}
df <- ce_data %>%
dplyr::select(newid, finlwt21, cost) %>%
dplyr::group_by(newid) %>%
dplyr::summarise(
cost = sum(cost),
finlwt21 = mean(finlwt21)
) %>%
dplyr::arrange(cost)
results <- numeric(length(probs))
for (i in 1:length(probs)) {
below <- df %>%
dplyr::filter(cumsum(finlwt21) < sum(finlwt21 * probs[i]))
above <- df %>%
dplyr::filter(cumsum(finlwt21) > sum(finlwt21 * probs[i]))
if (sum(below$finlwt21) == sum(above$finlwt21)) {
result <- sum(
below %>% dplyr::slice(n()) %>% dplyr::pull(cost),
above %>% dplyr::slice(1) %>% dplyr::pull(cost)
) / 2
} else {
result <- above %>% dplyr::slice(1) %>% dplyr::pull(cost)
}
results[i] <- result
}
result_names <- paste0(format(probs * 100, trim = TRUE), "%")
result_df <- data.frame(probs = result_names, quantile = results)
return(result_df)
}
| /R/ce-quantiles.R | no_license | awunderground/cepumd | R | false | false | 2,767 | r | #' Calculate a CE weighted quantiles
#'
#' @param ce_data A data frame containing at least a finlwt21 column and a cost
#' column. Both columns must be numeric.
#' @param probs A numeric vector of probabilities between 0 and 1 for which to
#' compute quantiles. Default is 0.5 (median).
#'
#' @return A two-column data frame in which the first column contains the
#' probabilities for which quantiles were calculated and their corresponding
#' quantiles in the second column.
#'
#' @export
#'
#' @importFrom dplyr group_by
#' @importFrom dplyr summarise
#'
#' @seealso \code{\link{ce_mean}}
#'
#' @examples
#' \dontrun{
#' # Download the HG file keeping the section for expenditures on utilities
#' utils_hg <- ce_hg(2017, interview) %>%
#' ce_uccs("Utilities, fuels, and public services", uccs_only = FALSE)
#'
#' # Download and prepare interview data
#' utils_interview <- ce_prepdata(
#' 2017,
#' interview,
#' uccs = ce_uccs(utils_hg, "Utilities, fuels, and public services"),
#' zp = NULL,
#' integrate_data = FALSE,
#' hg = utils_hg,
#' bls_urbn
#' )
#'
#' # Calculate the 25%, 50%, and 75% utilities expenditure quantiles
#' ce_quantiles(utils_interview)
#'
#' # Calculate the 25%, 50%, and 75% utilities expenditure quantiles by
#' # urbanicity
#' utils_interview %>%
#' tidyr::nest(-bls_urbn) %>%
#' mutate(quant_utils = purrr::map(data, ce_quantiles, c(0.25, 0.5, 0.75))) %>%
#' select(-data) %>%
#' unnest(quant_utils)
#' }
ce_quantiles <- function(ce_data, probs = 0.5) {
check_cols <- c("finlwt21", "cost")
if (length(setdiff(check_cols, names(ce_data))) > 0) {
stop("Your dataset needs to include 'finlwt21' and the 'cost' variable")
} else if (
length(setdiff(sapply(ce_data[, check_cols], class), "numeric")) > 0
) {
stop("'finlwt21' and the 'cost' variable must be numeric.")
}
df <- ce_data %>%
dplyr::select(newid, finlwt21, cost) %>%
dplyr::group_by(newid) %>%
dplyr::summarise(
cost = sum(cost),
finlwt21 = mean(finlwt21)
) %>%
dplyr::arrange(cost)
results <- numeric(length(probs))
for (i in 1:length(probs)) {
below <- df %>%
dplyr::filter(cumsum(finlwt21) < sum(finlwt21 * probs[i]))
above <- df %>%
dplyr::filter(cumsum(finlwt21) > sum(finlwt21 * probs[i]))
if (sum(below$finlwt21) == sum(above$finlwt21)) {
result <- sum(
below %>% dplyr::slice(n()) %>% dplyr::pull(cost),
above %>% dplyr::slice(1) %>% dplyr::pull(cost)
) / 2
} else {
result <- above %>% dplyr::slice(1) %>% dplyr::pull(cost)
}
results[i] <- result
}
result_names <- paste0(format(probs * 100, trim = TRUE), "%")
result_df <- data.frame(probs = result_names, quantile = results)
return(result_df)
}
|
CompetingRiskFrailtyOptim<-function(data.set=data.set,control=control,form=formula,risk.names=risk.names)
{
######################################################################
######################################################################
#Description of functions, which to be called later##################
######################################################################
######################################################################
####################################################################################
#generalized inverse of a matrix (more stable with LINPACK, as with LAPACK)#########
####################################################################################
ginverse<-function(X,tol=1e-100)
{
Xsvd<-svd(X,LINPACK=TRUE)
if (is.complex(X)) Xsvd$u<-Conj(Xsvd$u)
Positive<-Xsvd$d > max(tol * Xsvd$d[1], 0)
if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if (!any(Positive)) array(0, dim(X)[2:1])
else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop = FALSE]))
}
#################################################################
#value of the Gamma function, Striling-formula##################
#################################################################
gamma.stirling.ratio<-function(x,y)
{
exp(y-x)*exp((x-0.5)*log(x)-(y-0.5)*log(y))*(1+1/(12*x)+1/(288*x^2)-139/(51840*x^3)-571/(2488320*x^4))/(1+1/(12*y)+1/(288*y^2)-139/(51840*y^3)-571/(2488320*y^4))
}
#######################################################################################################################
#specify knots for truncated polynomial slpine basis (s. Ngo/Wand: Smoothing with mixed model software, 2003)##########
#######################################################################################################################
default.knots<-function(x,num.knots)
{
if (missing(num.knots)) num.knots <- max(5,min(floor(length(unique(x))/4),35))
return(quantile(unique(x),seq(0,1,length=(num.knots+2))[-c(1,(num.knots+2))]))
}
#################################################
#create artificial poisson data##################
#################################################
survival.to.poisson.frailty<-function(time=time, status.risk=status.risk, x=NULL)
{
event.time<-time[status.risk==1]
#specify number of integration points (not to be too large beacuse of computation)
if (length(unique(event.time)) < 30) grid<-c(unique(sort(event.time)),max(time+1)) else grid<-c(sort(sample(unique(time[status.risk==1]),30)),max(time+1))
m<-length(grid)
grid.minus1<-c(0,grid[1:(m-1)])
grid.plus1<-c(grid[2:m],max(time)+2)
#artificial {0,1}-poisson.data
yt.list<-lapply(seq(time),FUN=function(ij) c(rep(0,sum(grid<time[ij])),status.risk[ij]))
Mt.list<-lapply(seq(time),FUN=function(ij) sum(grid<time[ij])+1)
Mt<-unlist(Mt.list)
#inflated design.matrix
Xt<-lapply(seq(time),FUN=function(ij) kronecker(matrix(1,Mt[ij],1),t(matrix(as.matrix(x[ij,])))))
Xt<-eval(parse(text=paste("rbind(",paste("Xt[[",1:length(Xt),"]]",sep="",collapse=","),")",sep="")))
#offset
Ot.list<-lapply(seq(time),FUN=function(ij) log(0.5*(apply(cbind(grid.plus1[1:Mt[ij]],rep(time[ij],Mt[ij])),MARGIN=1,FUN=min) - apply(cbind(grid.minus1[1:Mt[ij]],rep(time[ij],Mt[ij])),MARGIN=1,FUN=min))))
#beachte: diese berechnung von offset weicht geringfgig am rechten rand von der angegebenen formel ab
return(list(grid=grid,y.list=yt.list,m.list=Mt.list,o.list=Ot.list,x=Xt))
}
############################################################
#Convex.program(via primal-dual optimization)###############
#optimize mixture weights c.m w.r.t. to constraints#########
############################################################
primal.dual<-function(penalty.cc,cc,lambda,nu)
{
###############################################
#ziel.funktion definieren, (= -log.lik)########
###############################################
funktion.wert<-function(cc) {-sum(apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(zeile) log(sum(zeile)))) + 0.5*penalty.cc*t(cc)%*%Nabla.penalty%*%(cc)}
################################
#residuen.funktion definieren###
################################
residuen<-function(cc,lambda,nu) #residuen.vektor bestimmen
{
#Delta.i.m ist matrix mit dim=c(N,M)
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
Nabla.f.0<-apply(Delta.i.m.quer,2,FUN=function(x) -sum(x)) + penalty.cc*Nabla.penalty%*%cc
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<-Nabla.2.f.0 + outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
Nabla.2.f.0<-Nabla.2.f.0 + penalty.cc*Nabla.penalty
#ableitungen der i.constraints in der form ...<0
Nabla.f.i<-diag(-1,length(cc)) #D.f=Nabla.f.i
Nabla.2.f.i.summe<-matrix(0,ncol=ncol(Nabla.2.f.0),nrow=nrow(Nabla.2.f.0))
r.dual<-Nabla.f.0+t(Nabla.f.i)%*%lambda+t(A)%*%nu
r.cent<- -diag(lambda)%*%f.i-rep(1/tt,length(lambda))
r.pri<-A%*%cc-b.constr
c(r.dual,r.cent,r.pri)
}
#########################
#start.werte#############
#########################
mu<-1000
epsilon.feas<-1e-07;epsilon<-1e-06 #abbruch.kriterien
m<-length(cc)
eta.dach<- t(cc)%*%lambda + 1e-08
##############################################
#parameter fr line.search-back.tracking######
##############################################
alpha.backtracking<-0.01
beta.backtracking<-0.5
###########################
#constraints###############
###########################
#i.constraints in der form ...<0
f.i<- -cc #i.contsriants: cc >=0
#e.constraints
A<-rbind(rep(1,M))
b.constr<-c(1)
repeat
{
f.i<- -cc
tt<-2*mu/eta.dach
#cat("t= ",tt,"\n\n")
#####################################
#Ableitungen bestimmen###############
#####################################
#Delta.i.m als matrix darstellen mit dim=c(N,M)
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
Nabla.f.0<-apply(Delta.i.m.quer,2,FUN=function(x) -sum(x)) + penalty.cc*Nabla.penalty%*%cc
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<- Nabla.2.f.0+outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
Nabla.2.f.0<-Nabla.2.f.0+penalty.cc*Nabla.penalty
Nabla.f.i<-diag(-1,length(cc)) #D.f=Nabla.f.i
Nabla.2.f.i.summe<-matrix(0,ncol=ncol(Nabla.2.f.0),nrow=nrow(Nabla.2.f.0))
############################
#residuen.vektoren##########
############################
r.dual<-Nabla.f.0+t(Nabla.f.i)%*%lambda+t(A)%*%nu
r.cent<- -diag(lambda)%*%f.i-rep(1/tt,length(lambda))
r.pri<-A%*%cc-b.constr
#cat("r.dual= ",sqrt(sum(r.dual^2)),"\n")
#cat("r.cent= ",sqrt(sum(r.cent^2)),"\n")
#cat("r.pri= ",sqrt(sum(r.pri^2)),"\n")
############################
#wert der ziel.funktion f.0#
############################
f.0<-funktion.wert(cc)
#############################################
#l�ung durch reduzierung####################
#############################################
#berechnung fr delta.cc und delta.nu (s. Harville,p.468)
H<-Nabla.2.f.0+Nabla.2.f.i.summe+diag(lambda/cc)
#H<-Nabla.2.f.0+Nabla.2.f.i.summe-t(Nabla.f.i)%*%qr.solve(diag(f.i))%*%diag(lambda)%*%Nabla.f.i
b<- -(r.dual+t(Nabla.f.i)%*%ginverse(diag(f.i),tol=1e-100)%*%r.cent)
d<- -r.pri
#k<-1000
#W<-ginverse(H+k*t(A)%*%A,tol=1e-100) #hier U=Id, aber zur stabilit� nehme z.B U=k*Id mit k=1000
W<-qr.solve(H+t(A)%*%A,tol=1e-100)
TT<-ginverse(A%*%W%*%t(A),tol=1e-100)%*%(A%*%W%*%b-d)
#delta.nu<-as.vector(TT+k*d)
delta.nu<-as.vector(TT+d)
delta.cc<-as.vector(W%*%b-W%*%t(A)%*%TT)
delta.lambda<-as.vector(-ginverse(diag(f.i),tol=1e-100)%*%diag(lambda)%*%Nabla.f.i%*%delta.cc+ginverse(diag(f.i),tol=1e-100)%*%r.cent)
#cat("delta.lambda= ",delta.lambda,"\n")
#cat("delta.nu= ",delta.nu,"\n")
#cat("delta.cc= ",delta.cc,"\n\n")
if (sqrt(sum(r.pri^2)) < epsilon.feas & sqrt(sum(r.dual^2)) < epsilon.feas & sqrt(sum(r.dual^2)) < epsilon.feas & eta.dach < epsilon) break
##################
#line.search######
##################
s<-1
while (any(cc+s*delta.cc < 0)) s<-s*beta.backtracking
while (sqrt(sum(residuen(cc+s*delta.cc,lambda+s*delta.lambda,nu+s*delta.nu)^2)) > (1-alpha.backtracking*s)*sqrt(sum(residuen(cc,lambda,nu)^2))) s<-s*beta.backtracking
#cat("s= ",s, "\n\n")
#######################
#updates###############
#######################
lambda<-abs(lambda+s*delta.lambda) #technische hilfe, da theoretisch lambda >0 gilt
nu<-nu+s*delta.nu
cc<-cc+s*delta.cc
#cat("lambda= ",lambda,"\n")
#cat("nu= ",nu,"\n")
#cat("cc= ",cc,"\n")
eta.dach<- t(cc)%*%lambda + 1e-12
#cat("eta.dach= ",eta.dach,"\n\n")
} #ende repeat
list(cc=as.vector(cc),lambda=as.vector(lambda),nu=as.vector(nu))
} #end of function primal.dual()
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
names(data.set)[1]<-"ID"
N<-length(data.set[,1]) #number of observations
N.Cluster<-length(unique(data.set$ID)) #number of clusters
ni<-table(data.set$ID) #number of observations in each cluster
ID.unique<-unique(data.set$ID)
names(ID.unique)<-1:length(ID.unique)
#################################################################
#define for each risk type the spline knots for spline bases#####
#################################################################
L<-length(grep("status",names(data.set),fix=TRUE))
Status.names<-names(data.set[,grep("status",names(data.set),fix=TRUE),drop=FALSE])
knots.t<-list()
for (l in 1:L)
{if (length(grep("num.knots",names(control)))==0) knots.t[[l]]<-default.knots(data.set$time[eval(parse(text=paste("data.set$",Status.names[l],sep="")))==1])
else knots.t[[l]]<-default.knots(data.set$time[eval(parse(text=paste("data.set$",Status.names[l],sep="")))==1],control$num.knots[l])
}
K.t<-lapply(knots.t,FUN=function(x) length(x))
############################################################
#indicator matrix with elementes d.ijl as values############
############################################################
D.ijl<-data.set[,grep("status",names(data.set),fix=TRUE),drop=FALSE]
##############################################################################################################
#create a model.matrix for (co)variables######################################################################
#N.B.!!! the reference categories for factors should be defined in the analizied data set bevore applied#####
##############################################################################################################
if (length(attributes(terms(form))$term.labels) == 0)
model.matrix.x<-model.matrix(~numeric(nrow(data.set)))[,1,drop=FALSE] else
model.matrix.x<-model.matrix(formula(paste("~",paste(attributes(terms(form))$term.labels,collapse="+"))),data=data.set)
#########################################################################
#artificial poisson data(for each competing risk)########################
#########################################################################
grid.points.list<-list() #t-points, at which the integral is being approximated
y.poisson.list<-list() #poisson.data
m.list<-list() #number of poisson data
Design.variables<-list() #inflated covariates matrix "model.matrix.x"
offset.list<-list() #offset.parameter
for (l in 1:L)
{
help.surv<-survival.to.poisson.frailty(time=data.set$time,status.risk=eval(parse(text=paste("data.set$",Status.names[l],sep=""))),x=model.matrix.x)
grid.points.list[[l]]<-help.surv$grid
y.poisson.list[[l]]<-help.surv$y.list
m.list[[l]]<-help.surv$m.list
offset.list[[l]]<-help.surv$o.list
Design.variables[[l]]<-help.surv$x
colnames(Design.variables[[l]])<-colnames(model.matrix.x);colnames(Design.variables[[l]])[1]<-"baseline"
}
p<-ncol(Design.variables[[1]])-1 #number of varaibles (including factor levels by factor variables) in the defined design matrix
#define survival time for expanded data set
Z.time.list<-lapply(1:L,FUN=function(l) sapply(data.set$time,FUN=function(x) c(grid.points.list[[l]][x>grid.points.list[[l]]],x)))
Z.time<-lapply(1:L,FUN=function(l) unlist(Z.time.list[[l]]))
#############################################################################################
#############################################################################################
#define the start values for beta parameter by means of glm regression for y.poisson#########
#############################################################################################
#############################################################################################
if (p > 0)
{
#for covariates
variables.time<-lapply(1:L,FUN=function(l) Design.variables[[l]][,2:ncol(Design.variables[[l]]),drop=FALSE]*Z.time[[l]])
for (l in 1:L) colnames(variables.time[[l]])<-paste("variables.time.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep="")
beta.start<-lapply(1:L,FUN=function(l) coef(glm(as.formula(paste("unlist(y.poisson.list[[l]])~",paste(c("Z.time[[l]]",paste(colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep="",collapse="+"),paste(colnames(variables.time[[l]]),sep="",collapse="+"),paste("offset(unlist(offset.list[[",l,"]]))",sep="")),collapse="+"))),data=data.frame(Design.variables[[l]],variables.time[[l]]),family=poisson)))
for (l in 1:L) names(beta.start[[l]])<-c(paste(c("beta.t.intercept.Baseline.","beta.t.slope.Baseline."),"Risk.",l,sep=""),paste("beta.t.intercept.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""),paste("beta.t.slope.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""))
} else
{
beta.start<-lapply(1:L,FUN=function(l) coef(glm(as.formula(paste("unlist(y.poisson.list[[l]])~",paste(c("Z.time[[l]]","offset(unlist(offset.list[[l]]))"),collapse="+"),sep="")),data=data.frame(Design.variables[[l]]),family=poisson)))
for (l in 1:L) names(beta.start[[l]])[1:2]<-c(paste("beta.t.intercept.Baseline.Risk.",l,sep=""),paste("beta.t.slope.Baseline.Risk.",l,sep=""))
}
#coefficients at Baseline
beta.t.intercept.Baseline<-lapply(1:L,FUN=function(l) beta.start[[l]][1]) #for Intercept
beta.t.slope.Baseline<-lapply(1:L,FUN=function(l) beta.start[[l]][2]) #for t-Trend
#coefficients at covariables
beta.t.intercept.list<-lapply(1:L,FUN=function(l) list())
beta.t.slope.list<-lapply(1:L,FUN=function(l) list())
for (l in 1:L)
{
beta.t.intercept.list[[l]][[1]]<-beta.t.intercept.Baseline[[l]]
beta.t.slope.list[[l]][[1]]<-beta.t.slope.Baseline[[l]]
if (p >0)
{
for (i in 3:(2+p)) beta.t.intercept.list[[l]][[i-1]]<-beta.start[[l]][i]
for (i in (2+p+1):(2+2*p)) beta.t.slope.list[[l]][[i-(1+p)]]<-beta.start[[l]][i]
}
}
beta.t.intercept<-list()
beta.t.slope<-list()
for (l in 1:L)
{
beta.t.intercept[[l]]<-unlist(beta.t.intercept.list[[l]])
beta.t.slope[[l]]<-unlist(beta.t.slope.list[[l]])
}
############################################################################
#initialize penalty parameters through variances of random effects##########
############################################################################
variance.penalty.t<-list()
for (l in 1:L)
{
if (p >0)
{
variance.penalty.t[[l]]<-rep(10e-1,p+1)
names(variance.penalty.t[[l]])<-c(paste("variance.t.Baseline.Risk.",l,sep=""),paste(paste("variance.t.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep=""),".Risk.",l,sep=""))
} else
{
variance.penalty.t[[l]]<-10e-1
names(variance.penalty.t[[l]])<-paste("variance.t.Baseline.Risk.",l,sep="")
}
}
###############################################
#create penalty matrices#######################
###############################################
Lambda.t<-lapply(1:L,FUN=function(l) eval(parse(text=paste("diag(c(",paste(paste("0,0,rep(1/variance.penalty.t[[l]][",1:length(variance.penalty.t[[l]]),sep=""),"],K.t[[l]])",sep="",collapse=","),"))",sep=""))))
D.t <-lapply(1:L,FUN=function(l) diag(rep(c(0,0,rep(1,K.t[[l]])),p+1)))
############################################
#initialize random effects##################
############################################
u.t<-list()
for (l in 1:L)
{
u.t.list<-list()
if (p > 0)
{
u.t.list[[1]]<-rep(0,K.t[[l]]); names(u.t.list[[1]])<-paste("u.t.Baseline.",1:K.t[[l]],".Risk.",l,sep="")
for (i in 1:p)
{u.t.list[[i+1]]<-rep(0,K.t[[l]])
names(u.t.list[[i+1]])<-paste("u.t.",rep(colnames(Design.variables[[l]])[i+1],K.t[[l]]),".",1:K.t[[l]],".Risk.",l,sep="")
}
} else
{
u.t.list[[1]]<-rep(0,K.t[[l]])
names(u.t.list[[1]])<-paste("u.t.Baseline.",1:K.t[[l]],".Risk.",l,sep="")
}
u.t[[l]]<-u.t.list
}
####################################
#combine in theta vector############
####################################
teta.t<-list()
teta<-list()
for (l in 1:L)
{
teta.t.Baseline<-c(beta.t.intercept.list[[l]][[1]],beta.t.slope.list[[l]][[1]],u.t[[l]][[1]])
teta.t.list<-list()
teta.t.list[[1]]<-teta.t.Baseline
if (p >0) for (i in 1:p) teta.t.list[[i+1]]<-c(beta.t.intercept.list[[l]][[i+1]],beta.t.slope.list[[l]][[i+1]],u.t[[l]][[i+1]])
teta.t[[l]]<-unlist(teta.t.list)
teta[[l]]<-c(teta.t[[l]])
}
########################################################################
########################################################################
#construct design matrices for the model################################
########################################################################
########################################################################
Design.matrix.t.list<-list()
Basis.t.list<-list()
variables.t<-list()
for (l in 1:L)
{
Basis.t.list[[l]]<-outer(Z.time[[l]],knots.t[[l]],FUN="-")
Basis.t.list[[l]]<-Basis.t.list[[l]]*(Basis.t.list[[l]]>0)
if ( p > 0)
{
variables.t[[l]]<-Design.variables[[l]][,2:ncol(Design.variables[[l]]),drop=FALSE]
Design.matrix.t.list[[l]]<-cbind(1,Z.time[[l]],Basis.t.list[[l]],eval(parse(text=paste("cbind(",paste("variables.t[[l]][,",1:p,"]*cbind(1,Z.time[[l]],Basis.t.list[[l]])",sep="",collapse=","),")"))))
} else
Design.matrix.t.list[[l]]<-cbind(1,Z.time[[l]],Basis.t.list[[l]])
}
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
################# EM-Algorithm ######################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################
#Initialize parameters of mixture distributions for frailties###########
########################################################################
M.list<-list() #number M.1,...,M.L of grid functions on the grid M.1.x...x.M.L
mu.list<-list() #means of Gamma mixture densities =1
sigma.quadrat.l.ml<-list() #variance of Gamma mixture densities set to
alpha.l.ml<-list() #shape parameter of gamma mixture densities
beta.l.ml<-list() #scale parameter of gamma mixture densities
for (l in 1:L)
{
M.list[[l]]<-7
mu.list[[l]]<-rep(1,M.list[[l]])
sigma.quadrat.l.ml[[l]]<-c(0.001,0.05,0.15,0.4,0.6,0.8,0.99)^2
alpha.l.ml[[l]]<-1/sigma.quadrat.l.ml[[l]]
beta.l.ml[[l]]<-alpha.l.ml[[l]]
}
M<-prod(unlist(M.list))
#set the vector of multiindices (expand to array) to order the mixture coefficients c.m
index.matrix<-expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=1:M.list[[",1:L,"]]",sep="",collapse=","),")"))))
#expnad shape and scale parameters accordingly
alpha.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=alpha.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
beta.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=beta.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
##############################################################################################################
#define Penalty Matrix on mixture weights c.m (marginal weights are being penalized)##########################
##############################################################################################################
sigma.quadrat.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=sigma.quadrat.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
sigma.penalty<-apply(sigma.quadrat.l.ml.matrix,1,sum)
Nabla.penalty<-diag(sigma.penalty^2)
################################################################################
################################################################################
#define penalty sequence for penalty parameter on mixture weights c.m###########
################################################################################
################################################################################
penalty.sequence<-c(0.001,0.1,1,10,100,1000,5000,10000,100000)
#penalty.sequence<-c(0.001,0.1)
#####################################################################################################################
#initialize list of frailty values for stopping of the EM-algorithm for each given value of penalty.sequence#########
#####################################################################################################################
w.list<-list()
#########################################################################################
#initialize vector of AIC, df.weigths and log.lik.margin values,#########################
#to define an optimum from them w.r.t. to the penalty sequence##########################
#########################################################################################
aic.vector<-c()
degree.freedom.weights.vector<-c()
log.lik.margin.vector<-c()
aic.and.log.lik.and.df.EM<-list()
##################################################################################################################
#initialize lists for the estimating parameters of the model,#####################################################
#each element of a list corresponds to a penalty value from the penalty sequence##################################
##################################################################################################################
cc.list<-list() #mixture weigths
for (l in 1:L) assign(paste("alpha.t.risk.",l,".list",sep=""),vector(mode="list",length=length(penalty.sequence))) #varying coefficients
deviation.t.list<-list() #deviation
frailty.estimate.list<-list() #frailties
##################################################################################################################
#initialize lists for resulting estimates of fixed and random components of varying coefficients of the model#####
#as well as penalty values for their random parts,################################################################
#each element of a list corresponds to a penalty value from the penalty sequence##################################
##################################################################################################################
estimates.fix<-list()
estimates.random<-list()
penalty.t.list<-list()
##################################################################################################################
#initialize lists for resulting estimates of degrees.of.freedom for varying coefficients##########################
##################################################################################################################
#df.varying<-list()
##############################################################
##############################################################
#loop for penalty parameter from penalty sequence############
##############################################################
##############################################################
for (penalty.cc in penalty.sequence)
{
if (control$print.penalty.mixture) cat("\n\n","penalty mixture =",penalty.cc,"\n\n")
if (control$print.estimates)
{
cat("\n\n","start values: fixed parameters of the varying coefficients","\n\n")
print(unlist(beta.t.intercept.list))
print(unlist(beta.t.slope.list))
cat("","\n\n")
#print(penalty)
}
#################################
#initialize following values#####
#################################
help.aic.and.log.lik.and.df.EM<-NULL #writes out (in matrix form) the values of aic, marginal log.lik and df's from each EM iteration
w.list<-list() #writes out frailties from each EM iteration for comparision for stopping criterion
w.list[[1]]<-lapply(1:L,FUN=function(l) lapply(seq(unique(data.set$ID)),FUN=function(i) 1 ))
log.lik.margin<-c() #marginal log likelihood of the model (given penalty values from the penalty sequence)
variance.em<-list() #writes out the variances(=1/penalty) of random components of varying coefficients (from each EM iteration)
variance.em[[1]]<-unlist(variance.penalty.t)
##########################################################
#start values for the convex optimization routine######### (can be defined out of the penalty.cc loop)
##########################################################
cc<-rep(1,M) #has to be "strong" feasible, i.e. fulfills i.constraints x >0 (but not necessaraly e.constraints Ax=b)
lambda<-rep(1,length(cc)) #fulfills lambda >0 (= number of i.constraints)
nu<-1 #can be of any initial value (= number of e.constraints)
###############################################################
###############################################################
######################EM.loop##################################
###############################################################
###############################################################
for (em in 1:control$niter.EM)
{
if (control$print.EM) cat("\n\n","EM.iteration =",em,"\n\n")
#####################################
############E-step###################
#####################################
##########################################################################
#calculate ingredients of the marginal density of poisson data############
##########################################################################
#lambda.ijl (updated from each M-step of EM-algorithm)
lambda.ijl.list<-list()
lambda.ijl.K.ij.list<-list()
lambda.sum<-matrix(0,nrow=length(seq(ID.unique)),ncol=L)
for (l in 1:L)
{
#build vector lambda.ijlk, k=1...K.ij for all ij; for each given risik l
lambda.ijl<-exp(Design.matrix.t.list[[l]]%*%teta[[l]]+unlist(offset.list[[l]]))
#for each cluster i define aggregated number of poisson data (through all spells j=1...ni)
help.lapply<-lapply(ID.unique,FUN=function(i) {help.number<-cumsum(unlist(m.list[[l]][data.set$ID==i]));help.number[length(help.number)]})
#indiziere geeignet fr jedes cluster
help.cut<-cumsum(unlist(help.lapply))
help.matrix<-cbind(c(0,help.cut[-length(help.cut)])+1,help.cut) #bildet die indizes.matrix fr das jeweilige individuum
indizes<-lapply(seq(ID.unique),FUN=function(i) help.matrix[i,1]:help.matrix[i,2]) #jeweilige index.vektoren
#zum i-ten Cluster zugeh�iger vektor der lambda.ijl
lambda.ijl.list[[l]]<-lapply(indizes,FUN=function(ind) lambda.ijl[ind])
#summierte lambda ber j und K fr jedes t-te Cluster
lambda.sum[,l]<-sapply(lambda.ijl.list[[l]],FUN=function(x) sum(x))
#die k=K.ij bei jeweiligen spells des i-ten individuums
lambda.ijl.K.ij<-lambda.ijl[cumsum(unlist(m.list[[l]]))]
#die zum i-ten Cluster geh�en, zusamen.gefasst
lambda.ijl.K.ij.list[[l]]<-lapply(ID.unique,FUN=function(i) lambda.ijl.K.ij[data.set$ID==i])
}
lambda.prod<-unlist(lapply(seq(ID.unique),FUN=function(i) {product.l<-c(); for (l in 1:L) product.l[l]<-prod(lambda.ijl.K.ij.list[[l]][[i]]^D.ijl[data.set$ID==as.numeric(ID.unique[names(ID.unique)==i]),l]);prod(product.l)}))
#updated shape and scale parameters of gamma distributions
alpha.l.ml.given.delta.i<-lapply(ID.unique,FUN=function(i) {help.matrix<-c();for (l in 1:L) {help.matrix<-cbind(help.matrix,sum(D.ijl[data.set$ID==i,l])+alpha.l.ml.matrix[,l])};help.matrix})
beta.l.ml.given.delta.i<-lapply(seq(ID.unique),FUN=function(i) {help.matrix<-c();for (l in 1:L) {help.matrix<-cbind(help.matrix,lambda.sum[i,l]+beta.l.ml.matrix[,l])};help.matrix})
#expand to the 3.dim array
alpha.l.ml.expand.to.array<-array(alpha.l.ml.matrix,c(M,L,length(seq(ID.unique))))
alpha.l.ml.given.delta.i.expand.to.array<-array(unlist(lapply(seq(ID.unique),FUN=function(i) alpha.l.ml.given.delta.i[[i]])),c(M,L,length(seq(ID.unique))))
beta.l.ml.expand.to.array<-array(beta.l.ml.matrix,c(M,L,length(seq(ID.unique))))
beta.l.ml.given.delta.i.expand.to.array<-array(unlist(lapply(seq(ID.unique),FUN=function(i) beta.l.ml.given.delta.i[[i]])),c(M,L,length(seq(ID.unique))))
#calculate ratio of gamma functions durch stirling.approximation
#(some neglegible differences between stirling approximation and direct calculation)
#through stirling approximation
#gamma.ratio<-ifelse(alpha.l.ml.given.delta.i.expand.to.array > 10 & alpha.l.ml.expand.to.array > 10,gamma.stirling.ratio(alpha.l.ml.given.delta.i.expand.to.array,alpha.l.ml.expand.to.array),gamma(alpha.l.ml.given.delta.i.expand.to.array)/gamma(alpha.l.ml.expand.to.array))
#direct through Gamma(z+1)=z*Gamma(z)
d.array<-alpha.l.ml.given.delta.i.expand.to.array-alpha.l.ml.expand.to.array
gamma.ratio<-array(apply(cbind(seq(alpha.l.ml.expand.to.array)),1,FUN=function(i) prod(alpha.l.ml.expand.to.array[i]:(alpha.l.ml.expand.to.array[i]+d.array[i]-1))),dim=dim(alpha.l.ml.expand.to.array))
gamma.ratio<-ifelse(d.array==0,1,gamma.ratio)
oben<-alpha.l.ml.expand.to.array*log(beta.l.ml.expand.to.array)
unten<-alpha.l.ml.given.delta.i.expand.to.array*log(beta.l.ml.given.delta.i.expand.to.array)
exp.ratio<-exp(oben-unten)
exp.gamma<-exp.ratio*gamma.ratio
exp.gamma.prod<-t(apply(exp.gamma,c(1,3),"prod"))
##########################################################
#combine to the matrix Delta (i-->rows,m-->columns)#######
##########################################################
Delta.i.m<-exp.gamma.prod*matrix(lambda.prod,ncol=ncol(exp.gamma.prod),nrow=nrow(exp.gamma.prod),byrow=FALSE)
############################
#optimal weights c.m########
############################
#(in the next EM iteration the first value will be set nearby that one already calculated from the preceeding iteration, to reduce computation)
cc<-primal.dual(penalty.cc,as.vector(cc+1e-05),lambda,nu)$cc
######################################################
#marginal weights c.(l)ml#############################
######################################################
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
cc.tilde.i<-matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m.quer
c.l.ml.i<-lapply(seq(ID.unique),FUN=function(i) lapply(1:L,FUN=function(l) lapply(1:M.list[[l]],FUN=function(m.l) sum(cc.tilde.i[i,index.matrix[,l]==m.l]))))
##########################################################
#define the degrees of freedom from proposed relation#####
##########################################################
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<- Nabla.2.f.0+outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
I<- Nabla.2.f.0 #not penalized fisher information (here implicit multiplied with "-1", because Nabla.2.f.0 calculated for -log.lik)
H<- Nabla.2.f.0+penalty.cc*Nabla.penalty #penalized fisher information
degree.freedom.weights<-sum(diag(qr.solve(H,tol=1e-100)%*%I))
#marginal log.lik will be calculated later; hence aic=-log.lik.margin+degree.freedom.weights
############################
#frailties estimates########
############################
w<-lapply(1:L,FUN=function(l) lapply(seq(unique(data.set$ID)),FUN=function(i) sum(unlist(c.l.ml.i[[i]][[l]])*unique(alpha.l.ml.given.delta.i[[i]][,l])/unique(beta.l.ml.given.delta.i[[i]][,l]))))
w.list[[em+1]]<-w
difference.frailty<-c(); for (l in 1:L) difference.frailty[l]<-sum((unlist(w.list[[em+1]][[l]])-unlist(w.list[[em]][[l]]))^2)
#cat("sum(w.alt-w.neu)^2= ",sum(abs.differenz.frailty),"\n\n")
#########################################################
#####################End of E-step of EM#################
#########################################################
#update the offset parameter of the model
#(depending on the value of frailty parameter w)
#this builds the link between frailty and theta
help.lapply<-list() #for each cluster i define aggregated number of poisson data(through all spells j=1...ni)
for (l in 1:L) help.lapply[[l]]<-unlist(lapply(ID.unique,FUN=function(i) {help.number<-cumsum(unlist(m.list[[l]][data.set$ID==i]));help.number[length(help.number)]}))
#for each risk l a vector of frailty values
offset.frailty<-lapply(1:L,FUN=function(l) unlist(offset.list[[l]]) + rep(log(unlist(w[[l]])),help.lapply[[l]]))
###########################################
############# M-step of EM ################
###########################################
##################################################################################################################
#approximate penalized fisher.type matrix (for frailty=1), to calculate approx. marginal log likilehood###########
##################################################################################################################
I.t.frailty.1<-list()
index.vector<-list()
for (l in 1:L)
{index.vector[[l]]<-(2+1):(2+K.t[[l]])
if (p > 0) for (k in 1:p) index.vector[[l]]<-c(index.vector[[l]],(2+K.t[[l]]+2*k+1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]))
}
variance.epoch<-list() #writes out the variances(=1/penalty) of random components of varying coefficients (from each epoch iteration)
#############################################################
#fr jeden Ausfall.typ l=1...L###############################
#############################################################
for (l in 1:L)
{
variance.epoch.l<-list() #initialize list of variances(=1/lambda); each element of this list is a value from current epoch
variance.epoch.l[[1]]<-unlist(variance.penalty.t[[l]])
for (epoch in 1:control$niter.epoch)
{
#penalized score for theta.t
S.t<-crossprod(Design.matrix.t.list[[l]],as.vector(unlist(y.poisson.list[[l]])-exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
S.t.pen<-S.t-Lambda.t[[l]]%*%D.t[[l]]%*%teta.t[[l]]
#penalized observed fisher.type matrix(multipliced with "-1")
I.t<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
I.t.pen<-I.t+Lambda.t[[l]]%*%D.t[[l]]
##########################################################################################
#approximation of penalised fisher.type matrix(with frailties=1)##########################
##########################################################################################
I.t.frailty.1[[l]]<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.list[[l]]))))
I.t.frailty.1[[l]]<-I.t.frailty.1[[l]][index.vector[[l]],index.vector[[l]]] + Lambda.t[[l]][index.vector[[l]],index.vector[[l]]]
#update theta.t
inverse.t<-qr.solve(I.t.pen,tol=1e-100)
teta.t.old<-teta.t[[l]]
teta.t.new<-teta.t.old+inverse.t%*%S.t.pen
names(teta.t.new)<-names(teta.t[[l]])
teta.t[[l]]<-teta.t.new
names(teta.t[[l]])<-names(teta.t.new)
teta[[l]]<-c(teta.t[[l]])
#update fixed components beta
beta.t.intercept[[l]]<-teta.t[[l]][grep("beta.t.intercept",names(teta.t[[l]]),fixed=TRUE)]
beta.t.slope[[l]]<-teta.t[[l]][grep("beta.t.slope",names(teta.t[[l]]),fixed=TRUE)]
#update random components u
u.t[[l]]<-teta.t[[l]][grep("u.t",names(teta.t[[l]]),fixed=TRUE)]
#########################################################################
#fixpoint iteration for update of lambda penalties#######################
#just one update w.r.t lambda=F(lambda)##################################
#########################################################################
#old values of variances(=1/lambda)
variance.penalty.t.old<-variance.penalty.t[[l]]
variance.penalty.old<-c(variance.penalty.t.old)
#define first the submatrices of the (penalized) fisher.information, correspondig to the random coefficients
index.vector.t<-(2+1):(2+K.t[[l]]) #for u.t.Baseline
if (p > 0) for (k in 1:p) index.vector.t<-c(index.vector.t,(2+K.t[[l]]+2*k+1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]))
I.t.pen.u.t<-I.t.pen[index.vector.t,index.vector.t] #penalizied fisher.type matrix for t
inverse.I.t.pen.u.t<-qr.solve(I.t.pen.u.t,tol=1e-100)
#update of penalties
variance.penalty.t.new<-c()
#baseline
variance.penalty.t.new[1]<-(sum(diag(inverse.I.t.pen.u.t[1:K.t[[l]],1:K.t[[l]]]))+sum(u.t[[l]][grep("Baseline",names(u.t[[l]]),fixed=TRUE)]^2))/K.t[[l]]
#covariates
if (p > 0) for (k in 1:p)
variance.penalty.t.new[k+1]<-(sum(diag(inverse.I.t.pen.u.t[(k*K.t[[l]]+1):((k+1)*K.t[[l]]),(k*K.t[[l]]+1):((k+1)*K.t[[l]])]))+sum(u.t[[l]][(k*K.t[[l]]+1):((k+1)*K.t[[l]])]^2))/K.t[[l]]
Lambda.t[[l]]<-eval(parse(text=paste("diag(c(",paste(paste("0,0,rep(1/variance.penalty.t.new[",1:length(variance.penalty.t.new),sep=""),"],K.t[[l]])",sep="",collapse=","),"))",sep="")))
variance.penalty.t[[l]]<-variance.penalty.t.new
variance.epoch.l[[epoch+1]]<-variance.penalty.t[[l]]
#stoping rule
if (sum((teta.t.new-teta.t.old)^2) < control$tol.epoch & sum((variance.epoch.l[[epoch+1]]-variance.epoch.l[[epoch]])^2) < control$tol.variance) break
} #end for (epoch in 1:niter.epoch)
variance.epoch[[l]]<-variance.epoch.l[[epoch+1]]
#degrees.of.freedom for varying coefficients
#assign(paste("df.",l,".Baseline",sep=""),sum(diag((qr.solve(I.t.pen,tol=1e-100)%*%I.t)[1:(2+K.t[[l]]),1:(2+K.t[[l]])])))
#if (p > 0) for (k in 1:p)
#{assign(paste("df.",l,".",colnames(Design.variables[[l]])[k+1],sep=""),sum(diag((qr.solve(I.t.pen,tol=1e-100)%*%I.t)[(2+K.t[[l]]+2*(k-1)+(k-1)*K.t[[l]]+1):(2+K.t[[l]]+2*k+k*K.t[[l]]),(2+K.t[[l]]+2*(k-1)+(k-1)*K.t[[l]]+1):(2+K.t[[l]]+2*k+k*K.t[[l]])])))
#}
} #end for (l in 1:L)
#################################################################################################
#marginal log likelihood (all random components integrated out from the model)###################
#(s. supplemented paper)#########################################################################
#################################################################################################
I.part<-sum(apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=T)*Delta.i.m,1,FUN=function(zeile) log(sum(zeile))))
II.part<-0.5*sum(sapply(1:L,FUN=function(l) as.vector(-t(u.t[[l]])%*%Lambda.t[[l]][index.vector[[l]],index.vector[[l]]]%*%u.t[[l]]) + sum(log(eigen(Lambda.t[[l]][index.vector[[l]],index.vector[[l]]],only.values=TRUE)$values)) - sum(log(eigen(I.t.frailty.1[[l]],only.values=TRUE)$values))))
log.lik.margin<-c(log.lik.margin,I.part+II.part)
if (control$print.log.lik) cat("\n\n","log.lik.margin =",I.part+II.part,"\n\n")
#AIC of the model
aic<- -(I.part+II.part)+degree.freedom.weights
#save the values of AIC, df and marginal log.lik from each EM iteration
help.aic.and.log.lik.and.df.EM<-rbind(help.aic.and.log.lik.and.df.EM,c(aic,degree.freedom.weights,I.part+II.part))
aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-help.aic.and.log.lik.and.df.EM
#stopping rule for EM iteration
variance.em[[em+1]]<-unlist(variance.epoch)
if (sum((teta.t.new-teta.t.old)^2) < control$tol.epoch & sum((variance.em[[em+1]]-variance.em[[em]])^2) < control$tol.variance & sum(difference.frailty) < control$tol.frailty) break
} #end for (em in 1:control$niter.EM)
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
#######################################End of EM Algorithm#################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
#save the last value of AIC, df and log.lik.margin from the EM algorithm as an optimal value, given the mixture penalty from penalty sequence
aic.vector<-c(aic.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,1])
degree.freedom.weights.vector<-c(degree.freedom.weights.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,2])
log.lik.margin.vector<-c(log.lik.margin.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,3])
#penalty(=1/lambda)
penalty.t<-list()
for (l in 1:L)
{
penalty.t[[l]]<-1/variance.penalty.t[[l]]
names(penalty.t[[l]])[1]<-paste("penalty.t.Baseline.Risk.",l,sep="")
if (p > 0) names(penalty.t[[l]])[-1]<-paste(paste("penalty.t.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep=""),".Risk.",l,sep="")
}
#print of resulting estimates: theta and penalty
if (control$print.estimates)
{
cat("\n\n","resulting estimates: beta and penalties","\n\n")
print(unlist(beta.t.intercept))
print(unlist(beta.t.slope))
cat("","\n\n")
print(unlist(penalty.t))
}
###############################################################################
###############################################################################
#calculate variances of the resulting estimates for theta (cp. Louis,1982)#####
###############################################################################
###############################################################################
co.variance.teta<-list()
variance.teta<-list()
offset.frailty<-lapply(1:L,FUN=function(l) unlist(offset.list[[l]]) + rep(log(unlist(w[[l]])),unlist(help.lapply[[l]])))
for (l in 1:L)
{
#complete information, already calculated
fisher.complete<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
#(conditional) variance of frailty w, given poisson data delta.l
variance.w<-unlist(lapply(seq(ID.unique),FUN=function(i) sum(unlist(c.l.ml.i[[i]][[l]])*unique(alpha.l.ml.given.delta.i[[i]][,l])/unique(beta.l.ml.given.delta.i[[i]][,l]^2))))
variance.w<-rep(variance.w,unlist(help.lapply[[l]])) #inflated, according to dimension of the Design.matrix
#missing information
vector.diag<-sqrt(variance.w)*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.list[[l]])))
fisher.missing<-crossprod(vector.diag*Design.matrix.t.list[[l]])
fisher.observed<-fisher.complete-fisher.missing
fisher.observed.pen<-fisher.observed+diag(c(diag(Lambda.t[[l]]))) #penalized observed fisher type matrix
#sandwich estimator of covariance matrix of theta
co.variance.teta[[l]]<-qr.solve(fisher.observed.pen,tol=1e-100)%*%fisher.observed%*%qr.solve(fisher.observed.pen,tol=1e-100)
variance.teta[[l]]<-diag(co.variance.teta[[l]])
names(variance.teta[[l]])<-c(names(teta[[l]]))
}
###################################
###Grid for time###################
###################################
length.grid<-1000
grid.t.list<-list()
B.grid.t.list<-list()
for (l in 1:L)
{
grid.t.list[[l]]<-seq(min(Z.time[[l]]),max(Z.time[[l]]),le=length.grid)
B.grid.t.list[[l]]<-outer(grid.t.list[[l]],knots.t[[l]],FUN="-")
B.grid.t.list[[l]]<-B.grid.t.list[[l]]*(B.grid.t.list[[l]]>0)
}
#############################
#Confidence Bands############
#############################
deviation.t<-list() #list of L elements, each element is a list of p+1 deviations for: baseline, covariate.1,...,covariate.p
C.t.grid<-list()
variance.t.Baseline<-list()
deviation.t.Baseline<-list()
for (l in 1:L)
{
deviation.t.matrix<-matrix(0,nrow=length(grid.t.list[[l]]),ncol=p+1)
colnames(deviation.t.matrix)<-paste("deviation.",colnames(Design.variables[[l]]),sep="")
C.t.grid[[l]]<-cbind(1,grid.t.list[[l]],B.grid.t.list[[l]])
#Baseline
co.variance.teta.Baseline<-co.variance.teta[[l]][1:(2+K.t[[l]]),1:(2+K.t[[l]])]
variance.t.Baseline[[l]]<-apply(C.t.grid[[l]],1,FUN=function(help.row) t(help.row)%*%co.variance.teta.Baseline%*%help.row)
deviation.t.Baseline[[l]]<-qnorm(0.975)*sqrt(variance.t.Baseline[[l]]) #conf.level alpha=0.05
deviation.t.matrix[,1]<-deviation.t.Baseline[[l]]
#covariates
if (p > 0) for (k in 1:p)
{
assign(paste("co.variance.teta.",colnames(Design.variables[[l]])[k+1],sep=""),co.variance.teta[[l]][(2+K.t[[l]]+2*k-1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]),(2+K.t[[l]]+2*k-1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]])])
assign(paste("variance.t.",colnames(Design.variables[[l]])[k+1],sep=""),apply(C.t.grid[[l]],1,FUN=function(help.row) t(help.row)%*%get(paste("co.variance.teta.",colnames(Design.variables[[l]])[k+1],sep=""))%*%help.row))
assign(paste("deviation.t.",colnames(Design.variables[[l]])[k+1],sep=""),qnorm(0.975)*sqrt(get(paste("variance.t.",colnames(Design.variables[[l]])[k+1],sep=""))))
deviation.t.matrix[,k+1]<-get(paste("deviation.t.",colnames(Design.variables[[l]])[k+1],sep=""))
}
deviation.t[[l]]<-deviation.t.matrix
} #end for(l in 1:L)
######################################################################
#varying coefficients for Baseline and covariates#####################
######################################################################
for (l in 1:L) assign(paste("estimate.beta.t.risk.",l,sep=""),eval(parse(text=paste("c(beta.t.intercept[[",l,"]],beta.t.slope[[",l,"]])",sep=""))))
#varying coefficients with values on the grid
for (l in 1:L) assign(paste("alpha.t.risk.",l,sep=""),vector(mode="list",length=p+1))
#Baseline
for (l in 1:L)
{
help.vector<-as.vector(cbind(1,grid.t.list[[l]])%*%get(paste("estimate.beta.t.risk.",l,sep=""))[grep("Baseline",names(get(paste("estimate.beta.t.risk.",l,sep=""))),fixed=TRUE)]+B.grid.t.list[[l]]%*%u.t[[l]][grep("Baseline",names(u.t[[l]]),fixed=TRUE)])
eval(parse(text=paste("alpha.t.risk.",l,"[[1]]","<-","help.vector",sep="")))
}
#covariates
if (p > 0)
for (l in 1:L)
{
for (k in 1:p)
{
help.vector<-as.vector(cbind(1,grid.t.list[[l]])%*%c(beta.t.intercept[[l]][grep(paste("beta.t.intercept.",colnames(Design.variables[[l]])[k+1],sep=""),names(beta.t.intercept[[l]]),fixed=TRUE)],beta.t.slope[[l]][grep(paste("beta.t.slope.",colnames(Design.variables[[l]])[k+1],sep=""),names(beta.t.slope[[l]]),fixed=TRUE)])+B.grid.t.list[[l]]%*%u.t[[l]][grep(paste("u.t.",colnames(Design.variables[[l]])[k+1],sep=""),names(u.t[[l]]),fixed=TRUE)])
eval(parse(text=paste("alpha.t.risk.",l,"[[",k+1,"]]","<-","help.vector",sep="")))
}
}
########################################
#resulting estimates of frailties#######
########################################
frailty.estimate<-w.list[[em]]
##########################################################################
#resulting estimates of fixed and random components of varying.coef#######
#as well as penalty values for their random parts#########################
##########################################################################
estimates.fix[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-c(unlist(beta.t.intercept),unlist(beta.t.slope))
estimates.random[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-u.t
penalty.t.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-unlist(penalty.t)
##############################################################################################
#fill out the list components according to the penalty value of mixture weights###############
##############################################################################################
cc.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-cc
for (l in 1:L) eval(parse(text=paste("alpha.t.risk.",l,".list","[[",seq(penalty.sequence)[penalty.sequence==penalty.cc],"]]","<-","alpha.t.risk.",l,sep="")))
deviation.t.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-deviation.t
frailty.estimate.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-frailty.estimate
}
#####################################################
#####################################################
#end for loop (penalty.cc in penalty.sequence)#######
#####################################################
#####################################################
####################################################################
#define the optimal value of penalty of mixture weights#############
####################################################################
penalty.cc.optim<-penalty.sequence[aic.vector==min(aic.vector)]
position.optim<-seq(penalty.sequence)[penalty.sequence==penalty.cc.optim]
if (control$print.estimates) cat("\n\n\n","optimal penalty of mixture weights =",penalty.cc.optim,"\n\n\n")
################################################################
#define according optimial values of model components###########
################################################################
#optimal mixture weights
cc.optim<-cc.list[[position.optim]]
#deviation as a list with data frames (w.r.t. to each risk) as elements of it
deviation.optim<-deviation.t.list[[position.optim]]
names(deviation.optim)<-paste("deviation.Risk.",1:L,sep="")
#optimal frailties
frailty.estimate.optim<-frailty.estimate.list[[position.optim]]
frailty.list<-list()
for (l in 1:L) frailty.list[[l]]<-unlist(frailty.estimate.optim[[l]])
names(frailty.list)<-paste("frailty.Risk.",1:L,sep="")
#optimal AIC,df.weights andlog.lik.margin values
aic.optim<-aic.vector[aic.vector==min(aic.vector)]
degree.freedom.weights.optim<-degree.freedom.weights.vector[aic.vector==min(aic.vector)]
log.lik.margin.optim<-log.lik.margin.vector[aic.vector==min(aic.vector)]
#varying coefficients as a list with data frames (w.r.t. to each risk) as elements of it
varying.list<-list()
for (l in 1:L)
{
assign(paste("alpha.t.risk.",l,".optim",sep=""),data.frame(get(paste("alpha.t.risk.",l,".list",sep=""))[[position.optim]]))
if (p > 0) var.coef.names<-c(paste("Baseline.Risk.",l,sep=""),paste(colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""))
else var.coef.names<-c(paste("Baseline.Risk.",l,sep=""))
eval(parse(text=paste("names(alpha.t.risk.",l,".optim",")","<-var.coef.names",sep="")))
varying.list[[l]]<-get(paste("alpha.t.risk.",l,".optim",sep=""))
}
#grid for plotting
grid.frame<-data.frame(grid.t.list)
names(grid.frame)<-paste("grid.Risk.",1:L,sep="")
#optimal values of fixed and random components of varying coefficients as well as penalties for their random parts
fixed.coef.optim<-estimates.fix[[position.optim]]
random.coef.optim<-estimates.random[[position.optim]]
penalty.varying.optim<-penalty.t.list[[position.optim]]
#optimal values of degrees.of.freedom for varying coefficients
#df.varying.optim<-df.varying[[position.optim]]
if (p > 0) factor.names<-colnames(Design.variables[[1]])[2:length(colnames(Design.variables[[1]]))] else factor.names<-character(0)
#################################################################
#give the supplemented names to the Risks########################
#################################################################
#fixed.coef (beta)
names(fixed.coef.optim)<-paste(sapply(1:length(fixed.coef.optim),FUN=function(i) strsplit(names(fixed.coef.optim),".Risk.")[[i]][1]),rep(risk.names,rep(p+1,L),sep="."))
#random.coef (u)
for (l in 1:L) names(random.coef.optim[[l]])<-paste(sapply(1:length(random.coef.optim[[l]]),FUN=function(i) strsplit(names(random.coef.optim[[l]]),".Risk.")[[i]][1]),risk.names[l],sep=".")
#penalty.varying.optim (lambda)
for (l in 1:L) names(penalty.varying.optim)[(1+(l-1)*(p+1)):(l*(p+1))]<-paste(sapply(1:(p+1),FUN=function(i) strsplit(names(penalty.varying.optim)[(1+(l-1)*(p+1)):(l*(p+1))],".Risk.")[[i]][1]),risk.names[l],sep=".")
#grid.frame
names(grid.frame)<-paste(sapply(1:length(grid.frame),FUN=function(i) strsplit(names(grid.frame),".Risk.")[[i]][1]),risk.names,sep=".")
#varying.list
for (l in 1:L) colnames(varying.list[[l]])<-paste(sapply(1:length(varying.list[[l]][1,]),FUN=function(i) strsplit(colnames(varying.list[[l]]),".Risk.")[[i]][1]),risk.names[l],sep=".")
#deviation.list
names(deviation.optim)<-paste(sapply(1:length(deviation.optim),FUN=function(i) strsplit(names(deviation.optim),".Risk.")[[i]][1]),risk.names,sep=".")
for (l in 1:L) colnames(deviation.optim[[l]])<-paste(colnames(deviation.optim[[l]]),risk.names[l],sep=".")
#frailty.list
names(frailty.list)<-paste(sapply(1:length(frailty.list),FUN=function(i) strsplit(names(frailty.list),".Risk.")[[i]][1]),risk.names,sep=".")
list(L=L,M.list=M.list,fixed.coef.optim=fixed.coef.optim, random.coef.optim=random.coef.optim, penalty.varying.optim=penalty.varying.optim, penalty.weights.optim=penalty.cc.optim, grid.frame=grid.frame, varying.list=varying.list, deviation.list=deviation.optim, frailty.list=frailty.list, mixture.weights=cc.optim, aic.optim=aic.optim, df.weights.optim=degree.freedom.weights.optim, log.lik.margin.optim=log.lik.margin.optim, p=p, factor.names=factor.names, risk.names=risk.names)
#aic.vector=aic.vector,df.weights.vector=degree.freedom.weights.vector,log.lik.margin.vector=log.lik.margin.vector
} #end of function CompetingRiskFrailtyOptim(...)
| /R/CompetingRiskFrailtyOptim.function.R | no_license | zhaoyiqi97/CompetingRiskFrailty | R | false | false | 55,403 | r | CompetingRiskFrailtyOptim<-function(data.set=data.set,control=control,form=formula,risk.names=risk.names)
{
######################################################################
######################################################################
#Description of functions, which to be called later##################
######################################################################
######################################################################
####################################################################################
#generalized inverse of a matrix (more stable with LINPACK, as with LAPACK)#########
####################################################################################
ginverse<-function(X,tol=1e-100)
{
Xsvd<-svd(X,LINPACK=TRUE)
if (is.complex(X)) Xsvd$u<-Conj(Xsvd$u)
Positive<-Xsvd$d > max(tol * Xsvd$d[1], 0)
if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if (!any(Positive)) array(0, dim(X)[2:1])
else Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop = FALSE]))
}
#################################################################
#value of the Gamma function, Striling-formula##################
#################################################################
gamma.stirling.ratio<-function(x,y)
{
exp(y-x)*exp((x-0.5)*log(x)-(y-0.5)*log(y))*(1+1/(12*x)+1/(288*x^2)-139/(51840*x^3)-571/(2488320*x^4))/(1+1/(12*y)+1/(288*y^2)-139/(51840*y^3)-571/(2488320*y^4))
}
#######################################################################################################################
#specify knots for truncated polynomial slpine basis (s. Ngo/Wand: Smoothing with mixed model software, 2003)##########
#######################################################################################################################
default.knots<-function(x,num.knots)
{
if (missing(num.knots)) num.knots <- max(5,min(floor(length(unique(x))/4),35))
return(quantile(unique(x),seq(0,1,length=(num.knots+2))[-c(1,(num.knots+2))]))
}
#################################################
#create artificial poisson data##################
#################################################
survival.to.poisson.frailty<-function(time=time, status.risk=status.risk, x=NULL)
{
event.time<-time[status.risk==1]
#specify number of integration points (not to be too large beacuse of computation)
if (length(unique(event.time)) < 30) grid<-c(unique(sort(event.time)),max(time+1)) else grid<-c(sort(sample(unique(time[status.risk==1]),30)),max(time+1))
m<-length(grid)
grid.minus1<-c(0,grid[1:(m-1)])
grid.plus1<-c(grid[2:m],max(time)+2)
#artificial {0,1}-poisson.data
yt.list<-lapply(seq(time),FUN=function(ij) c(rep(0,sum(grid<time[ij])),status.risk[ij]))
Mt.list<-lapply(seq(time),FUN=function(ij) sum(grid<time[ij])+1)
Mt<-unlist(Mt.list)
#inflated design.matrix
Xt<-lapply(seq(time),FUN=function(ij) kronecker(matrix(1,Mt[ij],1),t(matrix(as.matrix(x[ij,])))))
Xt<-eval(parse(text=paste("rbind(",paste("Xt[[",1:length(Xt),"]]",sep="",collapse=","),")",sep="")))
#offset
Ot.list<-lapply(seq(time),FUN=function(ij) log(0.5*(apply(cbind(grid.plus1[1:Mt[ij]],rep(time[ij],Mt[ij])),MARGIN=1,FUN=min) - apply(cbind(grid.minus1[1:Mt[ij]],rep(time[ij],Mt[ij])),MARGIN=1,FUN=min))))
#beachte: diese berechnung von offset weicht geringfgig am rechten rand von der angegebenen formel ab
return(list(grid=grid,y.list=yt.list,m.list=Mt.list,o.list=Ot.list,x=Xt))
}
############################################################
#Convex.program(via primal-dual optimization)###############
#optimize mixture weights c.m w.r.t. to constraints#########
############################################################
primal.dual<-function(penalty.cc,cc,lambda,nu)
{
###############################################
#ziel.funktion definieren, (= -log.lik)########
###############################################
funktion.wert<-function(cc) {-sum(apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(zeile) log(sum(zeile)))) + 0.5*penalty.cc*t(cc)%*%Nabla.penalty%*%(cc)}
################################
#residuen.funktion definieren###
################################
residuen<-function(cc,lambda,nu) #residuen.vektor bestimmen
{
#Delta.i.m ist matrix mit dim=c(N,M)
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
Nabla.f.0<-apply(Delta.i.m.quer,2,FUN=function(x) -sum(x)) + penalty.cc*Nabla.penalty%*%cc
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<-Nabla.2.f.0 + outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
Nabla.2.f.0<-Nabla.2.f.0 + penalty.cc*Nabla.penalty
#ableitungen der i.constraints in der form ...<0
Nabla.f.i<-diag(-1,length(cc)) #D.f=Nabla.f.i
Nabla.2.f.i.summe<-matrix(0,ncol=ncol(Nabla.2.f.0),nrow=nrow(Nabla.2.f.0))
r.dual<-Nabla.f.0+t(Nabla.f.i)%*%lambda+t(A)%*%nu
r.cent<- -diag(lambda)%*%f.i-rep(1/tt,length(lambda))
r.pri<-A%*%cc-b.constr
c(r.dual,r.cent,r.pri)
}
#########################
#start.werte#############
#########################
mu<-1000
epsilon.feas<-1e-07;epsilon<-1e-06 #abbruch.kriterien
m<-length(cc)
eta.dach<- t(cc)%*%lambda + 1e-08
##############################################
#parameter fr line.search-back.tracking######
##############################################
alpha.backtracking<-0.01
beta.backtracking<-0.5
###########################
#constraints###############
###########################
#i.constraints in der form ...<0
f.i<- -cc #i.contsriants: cc >=0
#e.constraints
A<-rbind(rep(1,M))
b.constr<-c(1)
repeat
{
f.i<- -cc
tt<-2*mu/eta.dach
#cat("t= ",tt,"\n\n")
#####################################
#Ableitungen bestimmen###############
#####################################
#Delta.i.m als matrix darstellen mit dim=c(N,M)
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
Nabla.f.0<-apply(Delta.i.m.quer,2,FUN=function(x) -sum(x)) + penalty.cc*Nabla.penalty%*%cc
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<- Nabla.2.f.0+outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
Nabla.2.f.0<-Nabla.2.f.0+penalty.cc*Nabla.penalty
Nabla.f.i<-diag(-1,length(cc)) #D.f=Nabla.f.i
Nabla.2.f.i.summe<-matrix(0,ncol=ncol(Nabla.2.f.0),nrow=nrow(Nabla.2.f.0))
############################
#residuen.vektoren##########
############################
r.dual<-Nabla.f.0+t(Nabla.f.i)%*%lambda+t(A)%*%nu
r.cent<- -diag(lambda)%*%f.i-rep(1/tt,length(lambda))
r.pri<-A%*%cc-b.constr
#cat("r.dual= ",sqrt(sum(r.dual^2)),"\n")
#cat("r.cent= ",sqrt(sum(r.cent^2)),"\n")
#cat("r.pri= ",sqrt(sum(r.pri^2)),"\n")
############################
#wert der ziel.funktion f.0#
############################
f.0<-funktion.wert(cc)
#############################################
#l�ung durch reduzierung####################
#############################################
#berechnung fr delta.cc und delta.nu (s. Harville,p.468)
H<-Nabla.2.f.0+Nabla.2.f.i.summe+diag(lambda/cc)
#H<-Nabla.2.f.0+Nabla.2.f.i.summe-t(Nabla.f.i)%*%qr.solve(diag(f.i))%*%diag(lambda)%*%Nabla.f.i
b<- -(r.dual+t(Nabla.f.i)%*%ginverse(diag(f.i),tol=1e-100)%*%r.cent)
d<- -r.pri
#k<-1000
#W<-ginverse(H+k*t(A)%*%A,tol=1e-100) #hier U=Id, aber zur stabilit� nehme z.B U=k*Id mit k=1000
W<-qr.solve(H+t(A)%*%A,tol=1e-100)
TT<-ginverse(A%*%W%*%t(A),tol=1e-100)%*%(A%*%W%*%b-d)
#delta.nu<-as.vector(TT+k*d)
delta.nu<-as.vector(TT+d)
delta.cc<-as.vector(W%*%b-W%*%t(A)%*%TT)
delta.lambda<-as.vector(-ginverse(diag(f.i),tol=1e-100)%*%diag(lambda)%*%Nabla.f.i%*%delta.cc+ginverse(diag(f.i),tol=1e-100)%*%r.cent)
#cat("delta.lambda= ",delta.lambda,"\n")
#cat("delta.nu= ",delta.nu,"\n")
#cat("delta.cc= ",delta.cc,"\n\n")
if (sqrt(sum(r.pri^2)) < epsilon.feas & sqrt(sum(r.dual^2)) < epsilon.feas & sqrt(sum(r.dual^2)) < epsilon.feas & eta.dach < epsilon) break
##################
#line.search######
##################
s<-1
while (any(cc+s*delta.cc < 0)) s<-s*beta.backtracking
while (sqrt(sum(residuen(cc+s*delta.cc,lambda+s*delta.lambda,nu+s*delta.nu)^2)) > (1-alpha.backtracking*s)*sqrt(sum(residuen(cc,lambda,nu)^2))) s<-s*beta.backtracking
#cat("s= ",s, "\n\n")
#######################
#updates###############
#######################
lambda<-abs(lambda+s*delta.lambda) #technische hilfe, da theoretisch lambda >0 gilt
nu<-nu+s*delta.nu
cc<-cc+s*delta.cc
#cat("lambda= ",lambda,"\n")
#cat("nu= ",nu,"\n")
#cat("cc= ",cc,"\n")
eta.dach<- t(cc)%*%lambda + 1e-12
#cat("eta.dach= ",eta.dach,"\n\n")
} #ende repeat
list(cc=as.vector(cc),lambda=as.vector(lambda),nu=as.vector(nu))
} #end of function primal.dual()
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
names(data.set)[1]<-"ID"
N<-length(data.set[,1]) #number of observations
N.Cluster<-length(unique(data.set$ID)) #number of clusters
ni<-table(data.set$ID) #number of observations in each cluster
ID.unique<-unique(data.set$ID)
names(ID.unique)<-1:length(ID.unique)
#################################################################
#define for each risk type the spline knots for spline bases#####
#################################################################
L<-length(grep("status",names(data.set),fix=TRUE))
Status.names<-names(data.set[,grep("status",names(data.set),fix=TRUE),drop=FALSE])
knots.t<-list()
for (l in 1:L)
{if (length(grep("num.knots",names(control)))==0) knots.t[[l]]<-default.knots(data.set$time[eval(parse(text=paste("data.set$",Status.names[l],sep="")))==1])
else knots.t[[l]]<-default.knots(data.set$time[eval(parse(text=paste("data.set$",Status.names[l],sep="")))==1],control$num.knots[l])
}
K.t<-lapply(knots.t,FUN=function(x) length(x))
############################################################
#indicator matrix with elementes d.ijl as values############
############################################################
D.ijl<-data.set[,grep("status",names(data.set),fix=TRUE),drop=FALSE]
##############################################################################################################
#create a model.matrix for (co)variables######################################################################
#N.B.!!! the reference categories for factors should be defined in the analizied data set bevore applied#####
##############################################################################################################
if (length(attributes(terms(form))$term.labels) == 0)
model.matrix.x<-model.matrix(~numeric(nrow(data.set)))[,1,drop=FALSE] else
model.matrix.x<-model.matrix(formula(paste("~",paste(attributes(terms(form))$term.labels,collapse="+"))),data=data.set)
#########################################################################
#artificial poisson data(for each competing risk)########################
#########################################################################
grid.points.list<-list() #t-points, at which the integral is being approximated
y.poisson.list<-list() #poisson.data
m.list<-list() #number of poisson data
Design.variables<-list() #inflated covariates matrix "model.matrix.x"
offset.list<-list() #offset.parameter
for (l in 1:L)
{
help.surv<-survival.to.poisson.frailty(time=data.set$time,status.risk=eval(parse(text=paste("data.set$",Status.names[l],sep=""))),x=model.matrix.x)
grid.points.list[[l]]<-help.surv$grid
y.poisson.list[[l]]<-help.surv$y.list
m.list[[l]]<-help.surv$m.list
offset.list[[l]]<-help.surv$o.list
Design.variables[[l]]<-help.surv$x
colnames(Design.variables[[l]])<-colnames(model.matrix.x);colnames(Design.variables[[l]])[1]<-"baseline"
}
p<-ncol(Design.variables[[1]])-1 #number of varaibles (including factor levels by factor variables) in the defined design matrix
#define survival time for expanded data set
Z.time.list<-lapply(1:L,FUN=function(l) sapply(data.set$time,FUN=function(x) c(grid.points.list[[l]][x>grid.points.list[[l]]],x)))
Z.time<-lapply(1:L,FUN=function(l) unlist(Z.time.list[[l]]))
#############################################################################################
#############################################################################################
#define the start values for beta parameter by means of glm regression for y.poisson#########
#############################################################################################
#############################################################################################
if (p > 0)
{
#for covariates
variables.time<-lapply(1:L,FUN=function(l) Design.variables[[l]][,2:ncol(Design.variables[[l]]),drop=FALSE]*Z.time[[l]])
for (l in 1:L) colnames(variables.time[[l]])<-paste("variables.time.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep="")
beta.start<-lapply(1:L,FUN=function(l) coef(glm(as.formula(paste("unlist(y.poisson.list[[l]])~",paste(c("Z.time[[l]]",paste(colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep="",collapse="+"),paste(colnames(variables.time[[l]]),sep="",collapse="+"),paste("offset(unlist(offset.list[[",l,"]]))",sep="")),collapse="+"))),data=data.frame(Design.variables[[l]],variables.time[[l]]),family=poisson)))
for (l in 1:L) names(beta.start[[l]])<-c(paste(c("beta.t.intercept.Baseline.","beta.t.slope.Baseline."),"Risk.",l,sep=""),paste("beta.t.intercept.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""),paste("beta.t.slope.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""))
} else
{
beta.start<-lapply(1:L,FUN=function(l) coef(glm(as.formula(paste("unlist(y.poisson.list[[l]])~",paste(c("Z.time[[l]]","offset(unlist(offset.list[[l]]))"),collapse="+"),sep="")),data=data.frame(Design.variables[[l]]),family=poisson)))
for (l in 1:L) names(beta.start[[l]])[1:2]<-c(paste("beta.t.intercept.Baseline.Risk.",l,sep=""),paste("beta.t.slope.Baseline.Risk.",l,sep=""))
}
#coefficients at Baseline
beta.t.intercept.Baseline<-lapply(1:L,FUN=function(l) beta.start[[l]][1]) #for Intercept
beta.t.slope.Baseline<-lapply(1:L,FUN=function(l) beta.start[[l]][2]) #for t-Trend
#coefficients at covariables
beta.t.intercept.list<-lapply(1:L,FUN=function(l) list())
beta.t.slope.list<-lapply(1:L,FUN=function(l) list())
for (l in 1:L)
{
beta.t.intercept.list[[l]][[1]]<-beta.t.intercept.Baseline[[l]]
beta.t.slope.list[[l]][[1]]<-beta.t.slope.Baseline[[l]]
if (p >0)
{
for (i in 3:(2+p)) beta.t.intercept.list[[l]][[i-1]]<-beta.start[[l]][i]
for (i in (2+p+1):(2+2*p)) beta.t.slope.list[[l]][[i-(1+p)]]<-beta.start[[l]][i]
}
}
beta.t.intercept<-list()
beta.t.slope<-list()
for (l in 1:L)
{
beta.t.intercept[[l]]<-unlist(beta.t.intercept.list[[l]])
beta.t.slope[[l]]<-unlist(beta.t.slope.list[[l]])
}
############################################################################
#initialize penalty parameters through variances of random effects##########
############################################################################
variance.penalty.t<-list()
for (l in 1:L)
{
if (p >0)
{
variance.penalty.t[[l]]<-rep(10e-1,p+1)
names(variance.penalty.t[[l]])<-c(paste("variance.t.Baseline.Risk.",l,sep=""),paste(paste("variance.t.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep=""),".Risk.",l,sep=""))
} else
{
variance.penalty.t[[l]]<-10e-1
names(variance.penalty.t[[l]])<-paste("variance.t.Baseline.Risk.",l,sep="")
}
}
###############################################
#create penalty matrices#######################
###############################################
Lambda.t<-lapply(1:L,FUN=function(l) eval(parse(text=paste("diag(c(",paste(paste("0,0,rep(1/variance.penalty.t[[l]][",1:length(variance.penalty.t[[l]]),sep=""),"],K.t[[l]])",sep="",collapse=","),"))",sep=""))))
D.t <-lapply(1:L,FUN=function(l) diag(rep(c(0,0,rep(1,K.t[[l]])),p+1)))
############################################
#initialize random effects##################
############################################
u.t<-list()
for (l in 1:L)
{
u.t.list<-list()
if (p > 0)
{
u.t.list[[1]]<-rep(0,K.t[[l]]); names(u.t.list[[1]])<-paste("u.t.Baseline.",1:K.t[[l]],".Risk.",l,sep="")
for (i in 1:p)
{u.t.list[[i+1]]<-rep(0,K.t[[l]])
names(u.t.list[[i+1]])<-paste("u.t.",rep(colnames(Design.variables[[l]])[i+1],K.t[[l]]),".",1:K.t[[l]],".Risk.",l,sep="")
}
} else
{
u.t.list[[1]]<-rep(0,K.t[[l]])
names(u.t.list[[1]])<-paste("u.t.Baseline.",1:K.t[[l]],".Risk.",l,sep="")
}
u.t[[l]]<-u.t.list
}
####################################
#combine in theta vector############
####################################
teta.t<-list()
teta<-list()
for (l in 1:L)
{
teta.t.Baseline<-c(beta.t.intercept.list[[l]][[1]],beta.t.slope.list[[l]][[1]],u.t[[l]][[1]])
teta.t.list<-list()
teta.t.list[[1]]<-teta.t.Baseline
if (p >0) for (i in 1:p) teta.t.list[[i+1]]<-c(beta.t.intercept.list[[l]][[i+1]],beta.t.slope.list[[l]][[i+1]],u.t[[l]][[i+1]])
teta.t[[l]]<-unlist(teta.t.list)
teta[[l]]<-c(teta.t[[l]])
}
########################################################################
########################################################################
#construct design matrices for the model################################
########################################################################
########################################################################
Design.matrix.t.list<-list()
Basis.t.list<-list()
variables.t<-list()
for (l in 1:L)
{
Basis.t.list[[l]]<-outer(Z.time[[l]],knots.t[[l]],FUN="-")
Basis.t.list[[l]]<-Basis.t.list[[l]]*(Basis.t.list[[l]]>0)
if ( p > 0)
{
variables.t[[l]]<-Design.variables[[l]][,2:ncol(Design.variables[[l]]),drop=FALSE]
Design.matrix.t.list[[l]]<-cbind(1,Z.time[[l]],Basis.t.list[[l]],eval(parse(text=paste("cbind(",paste("variables.t[[l]][,",1:p,"]*cbind(1,Z.time[[l]],Basis.t.list[[l]])",sep="",collapse=","),")"))))
} else
Design.matrix.t.list[[l]]<-cbind(1,Z.time[[l]],Basis.t.list[[l]])
}
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
################# EM-Algorithm ######################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################
#Initialize parameters of mixture distributions for frailties###########
########################################################################
M.list<-list() #number M.1,...,M.L of grid functions on the grid M.1.x...x.M.L
mu.list<-list() #means of Gamma mixture densities =1
sigma.quadrat.l.ml<-list() #variance of Gamma mixture densities set to
alpha.l.ml<-list() #shape parameter of gamma mixture densities
beta.l.ml<-list() #scale parameter of gamma mixture densities
for (l in 1:L)
{
M.list[[l]]<-7
mu.list[[l]]<-rep(1,M.list[[l]])
sigma.quadrat.l.ml[[l]]<-c(0.001,0.05,0.15,0.4,0.6,0.8,0.99)^2
alpha.l.ml[[l]]<-1/sigma.quadrat.l.ml[[l]]
beta.l.ml[[l]]<-alpha.l.ml[[l]]
}
M<-prod(unlist(M.list))
#set the vector of multiindices (expand to array) to order the mixture coefficients c.m
index.matrix<-expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=1:M.list[[",1:L,"]]",sep="",collapse=","),")"))))
#expnad shape and scale parameters accordingly
alpha.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=alpha.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
beta.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=beta.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
##############################################################################################################
#define Penalty Matrix on mixture weights c.m (marginal weights are being penalized)##########################
##############################################################################################################
sigma.quadrat.l.ml.matrix<-as.matrix(expand.grid(eval(parse(text=paste("list(",paste("Column.",1:L,"=sigma.quadrat.l.ml[[",1:L,"]]",sep="",collapse=","),")")))))
sigma.penalty<-apply(sigma.quadrat.l.ml.matrix,1,sum)
Nabla.penalty<-diag(sigma.penalty^2)
################################################################################
################################################################################
#define penalty sequence for penalty parameter on mixture weights c.m###########
################################################################################
################################################################################
penalty.sequence<-c(0.001,0.1,1,10,100,1000,5000,10000,100000)
#penalty.sequence<-c(0.001,0.1)
#####################################################################################################################
#initialize list of frailty values for stopping of the EM-algorithm for each given value of penalty.sequence#########
#####################################################################################################################
w.list<-list()
#########################################################################################
#initialize vector of AIC, df.weigths and log.lik.margin values,#########################
#to define an optimum from them w.r.t. to the penalty sequence##########################
#########################################################################################
aic.vector<-c()
degree.freedom.weights.vector<-c()
log.lik.margin.vector<-c()
aic.and.log.lik.and.df.EM<-list()
##################################################################################################################
#initialize lists for the estimating parameters of the model,#####################################################
#each element of a list corresponds to a penalty value from the penalty sequence##################################
##################################################################################################################
cc.list<-list() #mixture weigths
for (l in 1:L) assign(paste("alpha.t.risk.",l,".list",sep=""),vector(mode="list",length=length(penalty.sequence))) #varying coefficients
deviation.t.list<-list() #deviation
frailty.estimate.list<-list() #frailties
##################################################################################################################
#initialize lists for resulting estimates of fixed and random components of varying coefficients of the model#####
#as well as penalty values for their random parts,################################################################
#each element of a list corresponds to a penalty value from the penalty sequence##################################
##################################################################################################################
estimates.fix<-list()
estimates.random<-list()
penalty.t.list<-list()
##################################################################################################################
#initialize lists for resulting estimates of degrees.of.freedom for varying coefficients##########################
##################################################################################################################
#df.varying<-list()
##############################################################
##############################################################
#loop for penalty parameter from penalty sequence############
##############################################################
##############################################################
for (penalty.cc in penalty.sequence)
{
if (control$print.penalty.mixture) cat("\n\n","penalty mixture =",penalty.cc,"\n\n")
if (control$print.estimates)
{
cat("\n\n","start values: fixed parameters of the varying coefficients","\n\n")
print(unlist(beta.t.intercept.list))
print(unlist(beta.t.slope.list))
cat("","\n\n")
#print(penalty)
}
#################################
#initialize following values#####
#################################
help.aic.and.log.lik.and.df.EM<-NULL #writes out (in matrix form) the values of aic, marginal log.lik and df's from each EM iteration
w.list<-list() #writes out frailties from each EM iteration for comparision for stopping criterion
w.list[[1]]<-lapply(1:L,FUN=function(l) lapply(seq(unique(data.set$ID)),FUN=function(i) 1 ))
log.lik.margin<-c() #marginal log likelihood of the model (given penalty values from the penalty sequence)
variance.em<-list() #writes out the variances(=1/penalty) of random components of varying coefficients (from each EM iteration)
variance.em[[1]]<-unlist(variance.penalty.t)
##########################################################
#start values for the convex optimization routine######### (can be defined out of the penalty.cc loop)
##########################################################
cc<-rep(1,M) #has to be "strong" feasible, i.e. fulfills i.constraints x >0 (but not necessaraly e.constraints Ax=b)
lambda<-rep(1,length(cc)) #fulfills lambda >0 (= number of i.constraints)
nu<-1 #can be of any initial value (= number of e.constraints)
###############################################################
###############################################################
######################EM.loop##################################
###############################################################
###############################################################
for (em in 1:control$niter.EM)
{
if (control$print.EM) cat("\n\n","EM.iteration =",em,"\n\n")
#####################################
############E-step###################
#####################################
##########################################################################
#calculate ingredients of the marginal density of poisson data############
##########################################################################
#lambda.ijl (updated from each M-step of EM-algorithm)
lambda.ijl.list<-list()
lambda.ijl.K.ij.list<-list()
lambda.sum<-matrix(0,nrow=length(seq(ID.unique)),ncol=L)
for (l in 1:L)
{
#build vector lambda.ijlk, k=1...K.ij for all ij; for each given risik l
lambda.ijl<-exp(Design.matrix.t.list[[l]]%*%teta[[l]]+unlist(offset.list[[l]]))
#for each cluster i define aggregated number of poisson data (through all spells j=1...ni)
help.lapply<-lapply(ID.unique,FUN=function(i) {help.number<-cumsum(unlist(m.list[[l]][data.set$ID==i]));help.number[length(help.number)]})
#indiziere geeignet fr jedes cluster
help.cut<-cumsum(unlist(help.lapply))
help.matrix<-cbind(c(0,help.cut[-length(help.cut)])+1,help.cut) #bildet die indizes.matrix fr das jeweilige individuum
indizes<-lapply(seq(ID.unique),FUN=function(i) help.matrix[i,1]:help.matrix[i,2]) #jeweilige index.vektoren
#zum i-ten Cluster zugeh�iger vektor der lambda.ijl
lambda.ijl.list[[l]]<-lapply(indizes,FUN=function(ind) lambda.ijl[ind])
#summierte lambda ber j und K fr jedes t-te Cluster
lambda.sum[,l]<-sapply(lambda.ijl.list[[l]],FUN=function(x) sum(x))
#die k=K.ij bei jeweiligen spells des i-ten individuums
lambda.ijl.K.ij<-lambda.ijl[cumsum(unlist(m.list[[l]]))]
#die zum i-ten Cluster geh�en, zusamen.gefasst
lambda.ijl.K.ij.list[[l]]<-lapply(ID.unique,FUN=function(i) lambda.ijl.K.ij[data.set$ID==i])
}
lambda.prod<-unlist(lapply(seq(ID.unique),FUN=function(i) {product.l<-c(); for (l in 1:L) product.l[l]<-prod(lambda.ijl.K.ij.list[[l]][[i]]^D.ijl[data.set$ID==as.numeric(ID.unique[names(ID.unique)==i]),l]);prod(product.l)}))
#updated shape and scale parameters of gamma distributions
alpha.l.ml.given.delta.i<-lapply(ID.unique,FUN=function(i) {help.matrix<-c();for (l in 1:L) {help.matrix<-cbind(help.matrix,sum(D.ijl[data.set$ID==i,l])+alpha.l.ml.matrix[,l])};help.matrix})
beta.l.ml.given.delta.i<-lapply(seq(ID.unique),FUN=function(i) {help.matrix<-c();for (l in 1:L) {help.matrix<-cbind(help.matrix,lambda.sum[i,l]+beta.l.ml.matrix[,l])};help.matrix})
#expand to the 3.dim array
alpha.l.ml.expand.to.array<-array(alpha.l.ml.matrix,c(M,L,length(seq(ID.unique))))
alpha.l.ml.given.delta.i.expand.to.array<-array(unlist(lapply(seq(ID.unique),FUN=function(i) alpha.l.ml.given.delta.i[[i]])),c(M,L,length(seq(ID.unique))))
beta.l.ml.expand.to.array<-array(beta.l.ml.matrix,c(M,L,length(seq(ID.unique))))
beta.l.ml.given.delta.i.expand.to.array<-array(unlist(lapply(seq(ID.unique),FUN=function(i) beta.l.ml.given.delta.i[[i]])),c(M,L,length(seq(ID.unique))))
#calculate ratio of gamma functions durch stirling.approximation
#(some neglegible differences between stirling approximation and direct calculation)
#through stirling approximation
#gamma.ratio<-ifelse(alpha.l.ml.given.delta.i.expand.to.array > 10 & alpha.l.ml.expand.to.array > 10,gamma.stirling.ratio(alpha.l.ml.given.delta.i.expand.to.array,alpha.l.ml.expand.to.array),gamma(alpha.l.ml.given.delta.i.expand.to.array)/gamma(alpha.l.ml.expand.to.array))
#direct through Gamma(z+1)=z*Gamma(z)
d.array<-alpha.l.ml.given.delta.i.expand.to.array-alpha.l.ml.expand.to.array
gamma.ratio<-array(apply(cbind(seq(alpha.l.ml.expand.to.array)),1,FUN=function(i) prod(alpha.l.ml.expand.to.array[i]:(alpha.l.ml.expand.to.array[i]+d.array[i]-1))),dim=dim(alpha.l.ml.expand.to.array))
gamma.ratio<-ifelse(d.array==0,1,gamma.ratio)
oben<-alpha.l.ml.expand.to.array*log(beta.l.ml.expand.to.array)
unten<-alpha.l.ml.given.delta.i.expand.to.array*log(beta.l.ml.given.delta.i.expand.to.array)
exp.ratio<-exp(oben-unten)
exp.gamma<-exp.ratio*gamma.ratio
exp.gamma.prod<-t(apply(exp.gamma,c(1,3),"prod"))
##########################################################
#combine to the matrix Delta (i-->rows,m-->columns)#######
##########################################################
Delta.i.m<-exp.gamma.prod*matrix(lambda.prod,ncol=ncol(exp.gamma.prod),nrow=nrow(exp.gamma.prod),byrow=FALSE)
############################
#optimal weights c.m########
############################
#(in the next EM iteration the first value will be set nearby that one already calculated from the preceeding iteration, to reduce computation)
cc<-primal.dual(penalty.cc,as.vector(cc+1e-05),lambda,nu)$cc
######################################################
#marginal weights c.(l)ml#############################
######################################################
Delta.i.m.quer<-(1/apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m,1,FUN=function(x) sum(x)))*Delta.i.m
cc.tilde.i<-matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=TRUE)*Delta.i.m.quer
c.l.ml.i<-lapply(seq(ID.unique),FUN=function(i) lapply(1:L,FUN=function(l) lapply(1:M.list[[l]],FUN=function(m.l) sum(cc.tilde.i[i,index.matrix[,l]==m.l]))))
##########################################################
#define the degrees of freedom from proposed relation#####
##########################################################
Nabla.2.f.0<-matrix(0,M,M);for (i in seq(ID.unique)) Nabla.2.f.0<- Nabla.2.f.0+outer(Delta.i.m.quer[i,],Delta.i.m.quer[i,])
I<- Nabla.2.f.0 #not penalized fisher information (here implicit multiplied with "-1", because Nabla.2.f.0 calculated for -log.lik)
H<- Nabla.2.f.0+penalty.cc*Nabla.penalty #penalized fisher information
degree.freedom.weights<-sum(diag(qr.solve(H,tol=1e-100)%*%I))
#marginal log.lik will be calculated later; hence aic=-log.lik.margin+degree.freedom.weights
############################
#frailties estimates########
############################
w<-lapply(1:L,FUN=function(l) lapply(seq(unique(data.set$ID)),FUN=function(i) sum(unlist(c.l.ml.i[[i]][[l]])*unique(alpha.l.ml.given.delta.i[[i]][,l])/unique(beta.l.ml.given.delta.i[[i]][,l]))))
w.list[[em+1]]<-w
difference.frailty<-c(); for (l in 1:L) difference.frailty[l]<-sum((unlist(w.list[[em+1]][[l]])-unlist(w.list[[em]][[l]]))^2)
#cat("sum(w.alt-w.neu)^2= ",sum(abs.differenz.frailty),"\n\n")
#########################################################
#####################End of E-step of EM#################
#########################################################
#update the offset parameter of the model
#(depending on the value of frailty parameter w)
#this builds the link between frailty and theta
help.lapply<-list() #for each cluster i define aggregated number of poisson data(through all spells j=1...ni)
for (l in 1:L) help.lapply[[l]]<-unlist(lapply(ID.unique,FUN=function(i) {help.number<-cumsum(unlist(m.list[[l]][data.set$ID==i]));help.number[length(help.number)]}))
#for each risk l a vector of frailty values
offset.frailty<-lapply(1:L,FUN=function(l) unlist(offset.list[[l]]) + rep(log(unlist(w[[l]])),help.lapply[[l]]))
###########################################
############# M-step of EM ################
###########################################
##################################################################################################################
#approximate penalized fisher.type matrix (for frailty=1), to calculate approx. marginal log likilehood###########
##################################################################################################################
I.t.frailty.1<-list()
index.vector<-list()
for (l in 1:L)
{index.vector[[l]]<-(2+1):(2+K.t[[l]])
if (p > 0) for (k in 1:p) index.vector[[l]]<-c(index.vector[[l]],(2+K.t[[l]]+2*k+1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]))
}
variance.epoch<-list() #writes out the variances(=1/penalty) of random components of varying coefficients (from each epoch iteration)
#############################################################
#fr jeden Ausfall.typ l=1...L###############################
#############################################################
for (l in 1:L)
{
variance.epoch.l<-list() #initialize list of variances(=1/lambda); each element of this list is a value from current epoch
variance.epoch.l[[1]]<-unlist(variance.penalty.t[[l]])
for (epoch in 1:control$niter.epoch)
{
#penalized score for theta.t
S.t<-crossprod(Design.matrix.t.list[[l]],as.vector(unlist(y.poisson.list[[l]])-exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
S.t.pen<-S.t-Lambda.t[[l]]%*%D.t[[l]]%*%teta.t[[l]]
#penalized observed fisher.type matrix(multipliced with "-1")
I.t<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
I.t.pen<-I.t+Lambda.t[[l]]%*%D.t[[l]]
##########################################################################################
#approximation of penalised fisher.type matrix(with frailties=1)##########################
##########################################################################################
I.t.frailty.1[[l]]<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.list[[l]]))))
I.t.frailty.1[[l]]<-I.t.frailty.1[[l]][index.vector[[l]],index.vector[[l]]] + Lambda.t[[l]][index.vector[[l]],index.vector[[l]]]
#update theta.t
inverse.t<-qr.solve(I.t.pen,tol=1e-100)
teta.t.old<-teta.t[[l]]
teta.t.new<-teta.t.old+inverse.t%*%S.t.pen
names(teta.t.new)<-names(teta.t[[l]])
teta.t[[l]]<-teta.t.new
names(teta.t[[l]])<-names(teta.t.new)
teta[[l]]<-c(teta.t[[l]])
#update fixed components beta
beta.t.intercept[[l]]<-teta.t[[l]][grep("beta.t.intercept",names(teta.t[[l]]),fixed=TRUE)]
beta.t.slope[[l]]<-teta.t[[l]][grep("beta.t.slope",names(teta.t[[l]]),fixed=TRUE)]
#update random components u
u.t[[l]]<-teta.t[[l]][grep("u.t",names(teta.t[[l]]),fixed=TRUE)]
#########################################################################
#fixpoint iteration for update of lambda penalties#######################
#just one update w.r.t lambda=F(lambda)##################################
#########################################################################
#old values of variances(=1/lambda)
variance.penalty.t.old<-variance.penalty.t[[l]]
variance.penalty.old<-c(variance.penalty.t.old)
#define first the submatrices of the (penalized) fisher.information, correspondig to the random coefficients
index.vector.t<-(2+1):(2+K.t[[l]]) #for u.t.Baseline
if (p > 0) for (k in 1:p) index.vector.t<-c(index.vector.t,(2+K.t[[l]]+2*k+1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]))
I.t.pen.u.t<-I.t.pen[index.vector.t,index.vector.t] #penalizied fisher.type matrix for t
inverse.I.t.pen.u.t<-qr.solve(I.t.pen.u.t,tol=1e-100)
#update of penalties
variance.penalty.t.new<-c()
#baseline
variance.penalty.t.new[1]<-(sum(diag(inverse.I.t.pen.u.t[1:K.t[[l]],1:K.t[[l]]]))+sum(u.t[[l]][grep("Baseline",names(u.t[[l]]),fixed=TRUE)]^2))/K.t[[l]]
#covariates
if (p > 0) for (k in 1:p)
variance.penalty.t.new[k+1]<-(sum(diag(inverse.I.t.pen.u.t[(k*K.t[[l]]+1):((k+1)*K.t[[l]]),(k*K.t[[l]]+1):((k+1)*K.t[[l]])]))+sum(u.t[[l]][(k*K.t[[l]]+1):((k+1)*K.t[[l]])]^2))/K.t[[l]]
Lambda.t[[l]]<-eval(parse(text=paste("diag(c(",paste(paste("0,0,rep(1/variance.penalty.t.new[",1:length(variance.penalty.t.new),sep=""),"],K.t[[l]])",sep="",collapse=","),"))",sep="")))
variance.penalty.t[[l]]<-variance.penalty.t.new
variance.epoch.l[[epoch+1]]<-variance.penalty.t[[l]]
#stoping rule
if (sum((teta.t.new-teta.t.old)^2) < control$tol.epoch & sum((variance.epoch.l[[epoch+1]]-variance.epoch.l[[epoch]])^2) < control$tol.variance) break
} #end for (epoch in 1:niter.epoch)
variance.epoch[[l]]<-variance.epoch.l[[epoch+1]]
#degrees.of.freedom for varying coefficients
#assign(paste("df.",l,".Baseline",sep=""),sum(diag((qr.solve(I.t.pen,tol=1e-100)%*%I.t)[1:(2+K.t[[l]]),1:(2+K.t[[l]])])))
#if (p > 0) for (k in 1:p)
#{assign(paste("df.",l,".",colnames(Design.variables[[l]])[k+1],sep=""),sum(diag((qr.solve(I.t.pen,tol=1e-100)%*%I.t)[(2+K.t[[l]]+2*(k-1)+(k-1)*K.t[[l]]+1):(2+K.t[[l]]+2*k+k*K.t[[l]]),(2+K.t[[l]]+2*(k-1)+(k-1)*K.t[[l]]+1):(2+K.t[[l]]+2*k+k*K.t[[l]])])))
#}
} #end for (l in 1:L)
#################################################################################################
#marginal log likelihood (all random components integrated out from the model)###################
#(s. supplemented paper)#########################################################################
#################################################################################################
I.part<-sum(apply(matrix(cc,nrow=nrow(Delta.i.m),ncol=length(cc),byrow=T)*Delta.i.m,1,FUN=function(zeile) log(sum(zeile))))
II.part<-0.5*sum(sapply(1:L,FUN=function(l) as.vector(-t(u.t[[l]])%*%Lambda.t[[l]][index.vector[[l]],index.vector[[l]]]%*%u.t[[l]]) + sum(log(eigen(Lambda.t[[l]][index.vector[[l]],index.vector[[l]]],only.values=TRUE)$values)) - sum(log(eigen(I.t.frailty.1[[l]],only.values=TRUE)$values))))
log.lik.margin<-c(log.lik.margin,I.part+II.part)
if (control$print.log.lik) cat("\n\n","log.lik.margin =",I.part+II.part,"\n\n")
#AIC of the model
aic<- -(I.part+II.part)+degree.freedom.weights
#save the values of AIC, df and marginal log.lik from each EM iteration
help.aic.and.log.lik.and.df.EM<-rbind(help.aic.and.log.lik.and.df.EM,c(aic,degree.freedom.weights,I.part+II.part))
aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-help.aic.and.log.lik.and.df.EM
#stopping rule for EM iteration
variance.em[[em+1]]<-unlist(variance.epoch)
if (sum((teta.t.new-teta.t.old)^2) < control$tol.epoch & sum((variance.em[[em+1]]-variance.em[[em]])^2) < control$tol.variance & sum(difference.frailty) < control$tol.frailty) break
} #end for (em in 1:control$niter.EM)
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
#######################################End of EM Algorithm#################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
#save the last value of AIC, df and log.lik.margin from the EM algorithm as an optimal value, given the mixture penalty from penalty sequence
aic.vector<-c(aic.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,1])
degree.freedom.weights.vector<-c(degree.freedom.weights.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,2])
log.lik.margin.vector<-c(log.lik.margin.vector,aic.and.log.lik.and.df.EM[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]][em,3])
#penalty(=1/lambda)
penalty.t<-list()
for (l in 1:L)
{
penalty.t[[l]]<-1/variance.penalty.t[[l]]
names(penalty.t[[l]])[1]<-paste("penalty.t.Baseline.Risk.",l,sep="")
if (p > 0) names(penalty.t[[l]])[-1]<-paste(paste("penalty.t.",colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],sep=""),".Risk.",l,sep="")
}
#print of resulting estimates: theta and penalty
if (control$print.estimates)
{
cat("\n\n","resulting estimates: beta and penalties","\n\n")
print(unlist(beta.t.intercept))
print(unlist(beta.t.slope))
cat("","\n\n")
print(unlist(penalty.t))
}
###############################################################################
###############################################################################
#calculate variances of the resulting estimates for theta (cp. Louis,1982)#####
###############################################################################
###############################################################################
co.variance.teta<-list()
variance.teta<-list()
offset.frailty<-lapply(1:L,FUN=function(l) unlist(offset.list[[l]]) + rep(log(unlist(w[[l]])),unlist(help.lapply[[l]])))
for (l in 1:L)
{
#complete information, already calculated
fisher.complete<-crossprod(Design.matrix.t.list[[l]],Design.matrix.t.list[[l]]*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.frailty[[l]]))))
#(conditional) variance of frailty w, given poisson data delta.l
variance.w<-unlist(lapply(seq(ID.unique),FUN=function(i) sum(unlist(c.l.ml.i[[i]][[l]])*unique(alpha.l.ml.given.delta.i[[i]][,l])/unique(beta.l.ml.given.delta.i[[i]][,l]^2))))
variance.w<-rep(variance.w,unlist(help.lapply[[l]])) #inflated, according to dimension of the Design.matrix
#missing information
vector.diag<-sqrt(variance.w)*as.vector(exp(Design.matrix.t.list[[l]]%*%teta.t[[l]]+unlist(offset.list[[l]])))
fisher.missing<-crossprod(vector.diag*Design.matrix.t.list[[l]])
fisher.observed<-fisher.complete-fisher.missing
fisher.observed.pen<-fisher.observed+diag(c(diag(Lambda.t[[l]]))) #penalized observed fisher type matrix
#sandwich estimator of covariance matrix of theta
co.variance.teta[[l]]<-qr.solve(fisher.observed.pen,tol=1e-100)%*%fisher.observed%*%qr.solve(fisher.observed.pen,tol=1e-100)
variance.teta[[l]]<-diag(co.variance.teta[[l]])
names(variance.teta[[l]])<-c(names(teta[[l]]))
}
###################################
###Grid for time###################
###################################
length.grid<-1000
grid.t.list<-list()
B.grid.t.list<-list()
for (l in 1:L)
{
grid.t.list[[l]]<-seq(min(Z.time[[l]]),max(Z.time[[l]]),le=length.grid)
B.grid.t.list[[l]]<-outer(grid.t.list[[l]],knots.t[[l]],FUN="-")
B.grid.t.list[[l]]<-B.grid.t.list[[l]]*(B.grid.t.list[[l]]>0)
}
#############################
#Confidence Bands############
#############################
deviation.t<-list() #list of L elements, each element is a list of p+1 deviations for: baseline, covariate.1,...,covariate.p
C.t.grid<-list()
variance.t.Baseline<-list()
deviation.t.Baseline<-list()
for (l in 1:L)
{
deviation.t.matrix<-matrix(0,nrow=length(grid.t.list[[l]]),ncol=p+1)
colnames(deviation.t.matrix)<-paste("deviation.",colnames(Design.variables[[l]]),sep="")
C.t.grid[[l]]<-cbind(1,grid.t.list[[l]],B.grid.t.list[[l]])
#Baseline
co.variance.teta.Baseline<-co.variance.teta[[l]][1:(2+K.t[[l]]),1:(2+K.t[[l]])]
variance.t.Baseline[[l]]<-apply(C.t.grid[[l]],1,FUN=function(help.row) t(help.row)%*%co.variance.teta.Baseline%*%help.row)
deviation.t.Baseline[[l]]<-qnorm(0.975)*sqrt(variance.t.Baseline[[l]]) #conf.level alpha=0.05
deviation.t.matrix[,1]<-deviation.t.Baseline[[l]]
#covariates
if (p > 0) for (k in 1:p)
{
assign(paste("co.variance.teta.",colnames(Design.variables[[l]])[k+1],sep=""),co.variance.teta[[l]][(2+K.t[[l]]+2*k-1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]]),(2+K.t[[l]]+2*k-1+(k-1)*K.t[[l]]):(2+K.t[[l]]+2*k+k*K.t[[l]])])
assign(paste("variance.t.",colnames(Design.variables[[l]])[k+1],sep=""),apply(C.t.grid[[l]],1,FUN=function(help.row) t(help.row)%*%get(paste("co.variance.teta.",colnames(Design.variables[[l]])[k+1],sep=""))%*%help.row))
assign(paste("deviation.t.",colnames(Design.variables[[l]])[k+1],sep=""),qnorm(0.975)*sqrt(get(paste("variance.t.",colnames(Design.variables[[l]])[k+1],sep=""))))
deviation.t.matrix[,k+1]<-get(paste("deviation.t.",colnames(Design.variables[[l]])[k+1],sep=""))
}
deviation.t[[l]]<-deviation.t.matrix
} #end for(l in 1:L)
######################################################################
#varying coefficients for Baseline and covariates#####################
######################################################################
for (l in 1:L) assign(paste("estimate.beta.t.risk.",l,sep=""),eval(parse(text=paste("c(beta.t.intercept[[",l,"]],beta.t.slope[[",l,"]])",sep=""))))
#varying coefficients with values on the grid
for (l in 1:L) assign(paste("alpha.t.risk.",l,sep=""),vector(mode="list",length=p+1))
#Baseline
for (l in 1:L)
{
help.vector<-as.vector(cbind(1,grid.t.list[[l]])%*%get(paste("estimate.beta.t.risk.",l,sep=""))[grep("Baseline",names(get(paste("estimate.beta.t.risk.",l,sep=""))),fixed=TRUE)]+B.grid.t.list[[l]]%*%u.t[[l]][grep("Baseline",names(u.t[[l]]),fixed=TRUE)])
eval(parse(text=paste("alpha.t.risk.",l,"[[1]]","<-","help.vector",sep="")))
}
#covariates
if (p > 0)
for (l in 1:L)
{
for (k in 1:p)
{
help.vector<-as.vector(cbind(1,grid.t.list[[l]])%*%c(beta.t.intercept[[l]][grep(paste("beta.t.intercept.",colnames(Design.variables[[l]])[k+1],sep=""),names(beta.t.intercept[[l]]),fixed=TRUE)],beta.t.slope[[l]][grep(paste("beta.t.slope.",colnames(Design.variables[[l]])[k+1],sep=""),names(beta.t.slope[[l]]),fixed=TRUE)])+B.grid.t.list[[l]]%*%u.t[[l]][grep(paste("u.t.",colnames(Design.variables[[l]])[k+1],sep=""),names(u.t[[l]]),fixed=TRUE)])
eval(parse(text=paste("alpha.t.risk.",l,"[[",k+1,"]]","<-","help.vector",sep="")))
}
}
########################################
#resulting estimates of frailties#######
########################################
frailty.estimate<-w.list[[em]]
##########################################################################
#resulting estimates of fixed and random components of varying.coef#######
#as well as penalty values for their random parts#########################
##########################################################################
estimates.fix[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-c(unlist(beta.t.intercept),unlist(beta.t.slope))
estimates.random[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-u.t
penalty.t.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-unlist(penalty.t)
##############################################################################################
#fill out the list components according to the penalty value of mixture weights###############
##############################################################################################
cc.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-cc
for (l in 1:L) eval(parse(text=paste("alpha.t.risk.",l,".list","[[",seq(penalty.sequence)[penalty.sequence==penalty.cc],"]]","<-","alpha.t.risk.",l,sep="")))
deviation.t.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-deviation.t
frailty.estimate.list[[seq(penalty.sequence)[penalty.sequence==penalty.cc]]]<-frailty.estimate
}
#####################################################
#####################################################
#end for loop (penalty.cc in penalty.sequence)#######
#####################################################
#####################################################
####################################################################
#define the optimal value of penalty of mixture weights#############
####################################################################
penalty.cc.optim<-penalty.sequence[aic.vector==min(aic.vector)]
position.optim<-seq(penalty.sequence)[penalty.sequence==penalty.cc.optim]
if (control$print.estimates) cat("\n\n\n","optimal penalty of mixture weights =",penalty.cc.optim,"\n\n\n")
################################################################
#define according optimial values of model components###########
################################################################
#optimal mixture weights
cc.optim<-cc.list[[position.optim]]
#deviation as a list with data frames (w.r.t. to each risk) as elements of it
deviation.optim<-deviation.t.list[[position.optim]]
names(deviation.optim)<-paste("deviation.Risk.",1:L,sep="")
#optimal frailties
frailty.estimate.optim<-frailty.estimate.list[[position.optim]]
frailty.list<-list()
for (l in 1:L) frailty.list[[l]]<-unlist(frailty.estimate.optim[[l]])
names(frailty.list)<-paste("frailty.Risk.",1:L,sep="")
#optimal AIC,df.weights andlog.lik.margin values
aic.optim<-aic.vector[aic.vector==min(aic.vector)]
degree.freedom.weights.optim<-degree.freedom.weights.vector[aic.vector==min(aic.vector)]
log.lik.margin.optim<-log.lik.margin.vector[aic.vector==min(aic.vector)]
#varying coefficients as a list with data frames (w.r.t. to each risk) as elements of it
varying.list<-list()
for (l in 1:L)
{
assign(paste("alpha.t.risk.",l,".optim",sep=""),data.frame(get(paste("alpha.t.risk.",l,".list",sep=""))[[position.optim]]))
if (p > 0) var.coef.names<-c(paste("Baseline.Risk.",l,sep=""),paste(colnames(Design.variables[[l]])[2:ncol(Design.variables[[l]])],".Risk.",l,sep=""))
else var.coef.names<-c(paste("Baseline.Risk.",l,sep=""))
eval(parse(text=paste("names(alpha.t.risk.",l,".optim",")","<-var.coef.names",sep="")))
varying.list[[l]]<-get(paste("alpha.t.risk.",l,".optim",sep=""))
}
#grid for plotting
grid.frame<-data.frame(grid.t.list)
names(grid.frame)<-paste("grid.Risk.",1:L,sep="")
#optimal values of fixed and random components of varying coefficients as well as penalties for their random parts
fixed.coef.optim<-estimates.fix[[position.optim]]
random.coef.optim<-estimates.random[[position.optim]]
penalty.varying.optim<-penalty.t.list[[position.optim]]
#optimal values of degrees.of.freedom for varying coefficients
#df.varying.optim<-df.varying[[position.optim]]
if (p > 0) factor.names<-colnames(Design.variables[[1]])[2:length(colnames(Design.variables[[1]]))] else factor.names<-character(0)
#################################################################
#give the supplemented names to the Risks########################
#################################################################
#fixed.coef (beta)
names(fixed.coef.optim)<-paste(sapply(1:length(fixed.coef.optim),FUN=function(i) strsplit(names(fixed.coef.optim),".Risk.")[[i]][1]),rep(risk.names,rep(p+1,L),sep="."))
#random.coef (u)
for (l in 1:L) names(random.coef.optim[[l]])<-paste(sapply(1:length(random.coef.optim[[l]]),FUN=function(i) strsplit(names(random.coef.optim[[l]]),".Risk.")[[i]][1]),risk.names[l],sep=".")
#penalty.varying.optim (lambda)
for (l in 1:L) names(penalty.varying.optim)[(1+(l-1)*(p+1)):(l*(p+1))]<-paste(sapply(1:(p+1),FUN=function(i) strsplit(names(penalty.varying.optim)[(1+(l-1)*(p+1)):(l*(p+1))],".Risk.")[[i]][1]),risk.names[l],sep=".")
#grid.frame
names(grid.frame)<-paste(sapply(1:length(grid.frame),FUN=function(i) strsplit(names(grid.frame),".Risk.")[[i]][1]),risk.names,sep=".")
#varying.list
for (l in 1:L) colnames(varying.list[[l]])<-paste(sapply(1:length(varying.list[[l]][1,]),FUN=function(i) strsplit(colnames(varying.list[[l]]),".Risk.")[[i]][1]),risk.names[l],sep=".")
#deviation.list
names(deviation.optim)<-paste(sapply(1:length(deviation.optim),FUN=function(i) strsplit(names(deviation.optim),".Risk.")[[i]][1]),risk.names,sep=".")
for (l in 1:L) colnames(deviation.optim[[l]])<-paste(colnames(deviation.optim[[l]]),risk.names[l],sep=".")
#frailty.list
names(frailty.list)<-paste(sapply(1:length(frailty.list),FUN=function(i) strsplit(names(frailty.list),".Risk.")[[i]][1]),risk.names,sep=".")
list(L=L,M.list=M.list,fixed.coef.optim=fixed.coef.optim, random.coef.optim=random.coef.optim, penalty.varying.optim=penalty.varying.optim, penalty.weights.optim=penalty.cc.optim, grid.frame=grid.frame, varying.list=varying.list, deviation.list=deviation.optim, frailty.list=frailty.list, mixture.weights=cc.optim, aic.optim=aic.optim, df.weights.optim=degree.freedom.weights.optim, log.lik.margin.optim=log.lik.margin.optim, p=p, factor.names=factor.names, risk.names=risk.names)
#aic.vector=aic.vector,df.weights.vector=degree.freedom.weights.vector,log.lik.margin.vector=log.lik.margin.vector
} #end of function CompetingRiskFrailtyOptim(...)
|
# server.R
library(RestRserve)
library(data.table)
model_path = file.path(prefix, "model")
load(file.path(model_path, 'model.RData'))
# Extend content_type by adding in custom encoder and decoder
# it is important to enable your RestRserve to accept content_type: "text/csv" as this is
# the default method AWS Sagemaker in CSVSerializer class
encode_decode_middleware = EncodeDecodeMiddleware$new()
encode_decode_middleware$ContentHandlers$set_encode(
"text/csv",
encode_decode_middleware$ContentHandlers$get_encode('text/plain')
)
encode_decode_middleware$ContentHandlers$set_decode(
"text/csv",
encode_decode_middleware$ContentHandlers$get_decode('text/plain')
)
app = Application$new(middleware = list())
app$add_get("/ping",
FUN = function(req, res) {
res$set_body("R6sagemaker mars restrserve example")})
app$add_post("/invocations",
FUN= function(req, res){
# added in a optional switch to enable support for content_type:
# * "text/plain"
# * "text/csv"
# * "application/json"
data = switch(req$content_type,
"application/json" = as.data.table(req$body),
fread(req$body)) # method to read in "text/csv" data
result = predict(model, data, row.names=FALSE)
res$set_body(result)
}
)
app$append_middleware(encode_decode_middleware)
| /examples/hyperparameter_tuning/r_bring_your_own/mars-restrserve/server.R | permissive | OwenGarrity/sagemaker-r-sdk | R | false | false | 1,479 | r | # server.R
library(RestRserve)
library(data.table)
model_path = file.path(prefix, "model")
load(file.path(model_path, 'model.RData'))
# Extend content_type by adding in custom encoder and decoder
# it is important to enable your RestRserve to accept content_type: "text/csv" as this is
# the default method AWS Sagemaker in CSVSerializer class
encode_decode_middleware = EncodeDecodeMiddleware$new()
encode_decode_middleware$ContentHandlers$set_encode(
"text/csv",
encode_decode_middleware$ContentHandlers$get_encode('text/plain')
)
encode_decode_middleware$ContentHandlers$set_decode(
"text/csv",
encode_decode_middleware$ContentHandlers$get_decode('text/plain')
)
app = Application$new(middleware = list())
app$add_get("/ping",
FUN = function(req, res) {
res$set_body("R6sagemaker mars restrserve example")})
app$add_post("/invocations",
FUN= function(req, res){
# added in a optional switch to enable support for content_type:
# * "text/plain"
# * "text/csv"
# * "application/json"
data = switch(req$content_type,
"application/json" = as.data.table(req$body),
fread(req$body)) # method to read in "text/csv" data
result = predict(model, data, row.names=FALSE)
res$set_body(result)
}
)
app$append_middleware(encode_decode_middleware)
|
#FIXME: read this
makePreprocWrapperPCA = function(learner) {
assertClass(learner, classes = "Learner")
trainfun = function(data, target, args) {
cns = colnames(data)
nums = setdiff(cns[sapply(data, is.numeric)], target)
if (!length(nums))
return(list(data = data, control = list()))
x = data[, nums]
pca = prcomp(x, scale = TRUE)
data = data[, setdiff(cns, nums), drop = FALSE]
data = cbind(data, as.data.frame(pca$x))
ctrl = list(center = pca$center, scale = pca$scale, rotation = pca$rotation)
return(list(data = data, control = ctrl))
}
predictfun = function(data, target, args, control) {
# no numeric features ?
if (!length(control))
return(data)
cns = colnames(data)
nums = cns[sapply(data, is.numeric)]
x = as.matrix(data[, nums, drop = FALSE])
x = scale(x, center = control$center, scale = control$scale)
x = x %*% control$rotation
data2 = data[, setdiff(cns, nums), drop = FALSE]
data2 = cbind(data2, as.data.frame(x))
return(data2)
}
makePreprocWrapper(learner, trainfun, predictfun)
}
| /R/PreprocWrapperPCA.R | no_license | Daron-Wan/mlr | R | false | false | 1,102 | r | #FIXME: read this
makePreprocWrapperPCA = function(learner) {
assertClass(learner, classes = "Learner")
trainfun = function(data, target, args) {
cns = colnames(data)
nums = setdiff(cns[sapply(data, is.numeric)], target)
if (!length(nums))
return(list(data = data, control = list()))
x = data[, nums]
pca = prcomp(x, scale = TRUE)
data = data[, setdiff(cns, nums), drop = FALSE]
data = cbind(data, as.data.frame(pca$x))
ctrl = list(center = pca$center, scale = pca$scale, rotation = pca$rotation)
return(list(data = data, control = ctrl))
}
predictfun = function(data, target, args, control) {
# no numeric features ?
if (!length(control))
return(data)
cns = colnames(data)
nums = cns[sapply(data, is.numeric)]
x = as.matrix(data[, nums, drop = FALSE])
x = scale(x, center = control$center, scale = control$scale)
x = x %*% control$rotation
data2 = data[, setdiff(cns, nums), drop = FALSE]
data2 = cbind(data2, as.data.frame(x))
return(data2)
}
makePreprocWrapper(learner, trainfun, predictfun)
}
|
# TODO: Method cleaning character strings
#
# Author: Miguel Alvarez
################################################################################
# Generic method
setGeneric("clean_strings",
function(x, ...)
standardGeneric("clean_strings"))
# Method for character
setMethod("clean_strings", signature(x="character"),
function(x, from="utf8", to="utf8") {
x <- iconv(x, from, to)
x <- trimws(x, "both")
x <- gsub("\\s+", " ", x)
return(x)
}
)
# Method for factor
setMethod("clean_strings", signature(x="factor"),
function(x, from="utf8", to="utf8") {
base::levels(x) <- clean_strings(base::levels(x), from, to)
return(x)
}
)
# Method for data.frame
setMethod("clean_strings", signature(x="data.frame"),
function(x, from="utf8", to="utf8") {
for(i in colnames(x)) {
if(is.character(x[,i]) | is.factor(x[,i]))
x[,i] <- clean_strings(x[,i], from, to)
}
return(x)
}
)
| /R/clean_strings.R | no_license | heibl/taxlist | R | false | false | 966 | r | # TODO: Method cleaning character strings
#
# Author: Miguel Alvarez
################################################################################
# Generic method
setGeneric("clean_strings",
function(x, ...)
standardGeneric("clean_strings"))
# Method for character
setMethod("clean_strings", signature(x="character"),
function(x, from="utf8", to="utf8") {
x <- iconv(x, from, to)
x <- trimws(x, "both")
x <- gsub("\\s+", " ", x)
return(x)
}
)
# Method for factor
setMethod("clean_strings", signature(x="factor"),
function(x, from="utf8", to="utf8") {
base::levels(x) <- clean_strings(base::levels(x), from, to)
return(x)
}
)
# Method for data.frame
setMethod("clean_strings", signature(x="data.frame"),
function(x, from="utf8", to="utf8") {
for(i in colnames(x)) {
if(is.character(x[,i]) | is.factor(x[,i]))
x[,i] <- clean_strings(x[,i], from, to)
}
return(x)
}
)
|
library(librarian)
shelf(dplyr, ggplot2, plotly, leaflet, shiny)
set.seed(69)
mydata <- readRDS(file = "data.rds")
function(input, output, session) {
#### Plot Output ####
output$plot1 <- renderPlotly(
if(input$variable_cont == "length"){
ggplotly(ggplot(mydata) +
geom_histogram(aes(x=ride_length, fill = member_casual)) +
xlim(0, 10000) +
labs(fill = "Member Status?") +
xlab("Length of Time (s)") +
ggtitle(paste("Distribution of","Length of Time (s)"," by Member Status")) +
theme_bw())
}
else{
ggplotly(ggplot(mydata) +
geom_histogram(aes(x=distance, fill = member_casual)) +
xlim(0, 10000) +
labs(fill = "Member Status?") +
xlab("Distance (m)") +
ggtitle(paste("Distribution of ","Distance (m)","by Member Status")) +
theme_bw())
}
)
output$plot2 <- renderPlotly(
if(input$variable_cat== "weekday"){
ggplotly(ggplot(mydata) +
geom_bar(aes(x=day, fill = member_casual), position = "dodge") +
labs(fill = "Member Status?") +
xlab("Day of the Week") +
ggtitle(paste("Distribution of","Weekday Usage"," by Member Status")) +
theme_bw())
}
else{
ggplotly(ggplot(mydata) +
geom_bar(aes(x=rideable_type, fill = member_casual), position = "dodge") +
labs(fill = "Member Status?") +
xlab("Rideable Type") +
ggtitle(paste("Distribution of","Rideable Type"," by Member Status")) +
theme_bw())
}
)
#### Map Output ####
pal <- colorFactor(palette = c("red", "blue"), domain = mydata$member_casual)
output$mymap <- leaflet::renderLeaflet(
mydata %>%
sample_n(size = input$count) %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(color = ~pal(member_casual),
clusterOptions = markerClusterOptions(maxClusterRadius = input$radius)) %>%
addLegend(values = ~member_casual,
pal = pal,
opacity = 1,
title = "Member or Casual User?")
)
} | /Cyclistic Shiny/Cyclistic_shiny/server.R | no_license | coreyneff/Data_products | R | false | false | 2,232 | r | library(librarian)
shelf(dplyr, ggplot2, plotly, leaflet, shiny)
set.seed(69)
mydata <- readRDS(file = "data.rds")
function(input, output, session) {
#### Plot Output ####
output$plot1 <- renderPlotly(
if(input$variable_cont == "length"){
ggplotly(ggplot(mydata) +
geom_histogram(aes(x=ride_length, fill = member_casual)) +
xlim(0, 10000) +
labs(fill = "Member Status?") +
xlab("Length of Time (s)") +
ggtitle(paste("Distribution of","Length of Time (s)"," by Member Status")) +
theme_bw())
}
else{
ggplotly(ggplot(mydata) +
geom_histogram(aes(x=distance, fill = member_casual)) +
xlim(0, 10000) +
labs(fill = "Member Status?") +
xlab("Distance (m)") +
ggtitle(paste("Distribution of ","Distance (m)","by Member Status")) +
theme_bw())
}
)
output$plot2 <- renderPlotly(
if(input$variable_cat== "weekday"){
ggplotly(ggplot(mydata) +
geom_bar(aes(x=day, fill = member_casual), position = "dodge") +
labs(fill = "Member Status?") +
xlab("Day of the Week") +
ggtitle(paste("Distribution of","Weekday Usage"," by Member Status")) +
theme_bw())
}
else{
ggplotly(ggplot(mydata) +
geom_bar(aes(x=rideable_type, fill = member_casual), position = "dodge") +
labs(fill = "Member Status?") +
xlab("Rideable Type") +
ggtitle(paste("Distribution of","Rideable Type"," by Member Status")) +
theme_bw())
}
)
#### Map Output ####
pal <- colorFactor(palette = c("red", "blue"), domain = mydata$member_casual)
output$mymap <- leaflet::renderLeaflet(
mydata %>%
sample_n(size = input$count) %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(color = ~pal(member_casual),
clusterOptions = markerClusterOptions(maxClusterRadius = input$radius)) %>%
addLegend(values = ~member_casual,
pal = pal,
opacity = 1,
title = "Member or Casual User?")
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_sensitive.R
\name{crop_sensitive}
\alias{crop_sensitive}
\alias{crop_sensitive.default}
\alias{crop_sensitive.data.frame}
\alias{crop_sensitive.array}
\alias{crop_sensitive.sf}
\title{Crop sensitive indices}
\usage{
crop_sensitive(object, ...)
\method{crop_sensitive}{default}(object, tmin, ...)
\method{crop_sensitive}{data.frame}(object, day.one, ...)
\method{crop_sensitive}{array}(object, day.one, ...)
\method{crop_sensitive}{sf}(object, day.one, ..., as.sf = TRUE)
}
\arguments{
\item{object}{a numeric vector with the maximum temperature,
or a data.frame with geographical coordinates (lonlat),
or an object of class \code{sf} with geometry 'POINT' or 'POLYGON',
or an \code{array} with two dimensions containing the
maximum and minimum temperature, in that order. See details}
\item{...}{additional arguments passed to methods. See details}
\item{tmin}{a numeric vector with the minimum temperature}
\item{day.one}{a vector of class \code{Date} or any other object that can be
coerced to \code{Date} (e.g. integer, character YYYY-MM-DD) for the starting
day to capture the climate data}
\item{as.sf}{logical, to return an object of class 'sf'}
}
\value{
A dataframe with crop sensitive indices with n colunms depending on the
number of thresholds passed to each index:
\item{hts_mean}{high temperature stress using daily MEAN temperature,
and given as percentage number of days a certain threshold is exceeded}
\item{hts_max}{high temperature stress using daily MAX temperature,
and given as percentage number of days a certain threshold is exceeded}
\item{hse}{heat stress event, and given as percentage number of days a
a certain threshold is exceeded for at least two consecutive days}
\item{hse_ms}{heat stress event, and given the maximum number of days
a certain threshold is exceeded for at least two consecutive days}
\item{cdi_mean}{crop duration index using daily MEAN temperature,
and given as max(Tmean - threshold, 0)}
\item{cdi_max}{crop duration index using daily MAX temperature,
and given as max(Tmax - threshold, 0)}
\item{lethal}{lethal temperatures, defined as percentage of days during the
timeseries where daily MEAN temperature exceeds a given threshold}
}
\description{
Compute crop sensitive indices. These indices are designed to capture
the changes in temperature extremes during key phenological stages
(e.g. anthesis), but can also be applied to other phenological stages.
}
\details{
The function uses pre-defined threshold to compute the indices. For hts_mean (32,
35, 38 Celsius), for hts_max (36, 39, 42 Celsius), for hse (31 Celsius), for
cdi_mean (22, 23, 24 Celsius), for cdi_max (27, 28, 29 Celsius) and for
lethal (43, 46, 49 Celsius).
Additional arguments:
The thresholds can be adjusted using the arguments \code{hts_mean.threshold},
\code{hts_max.threshold}, \code{hse.threshold}, \code{cdi_mean.threshold},
\code{cdi_max.threshold} and \code{lethal.threshold} which are a numeric (or
vector of numeric)
\code{last.day}: an object (optional to \var{span}) of class \code{Date} or
any other object that can be coerced to \code{Date} (e.g. integer, character
YYYY-MM-DD) for the last day of the time series. For \code{data.frame}, \code{array}
and \code{sf} methods
\code{span}: an integer (optional to \var{last.day}) or a vector with
integers (optional if \var{last.day} is given) for the length of
the time series to be captured. For \code{data.frame}, \code{array}
and \code{sf} methods
}
\examples{
# the default method
set.seed(78)
tmax <- runif(50, 37, 47)
set.seed(79)
tmin <- runif(50, 31, 34)
crop_sensitive(tmax, tmin)
###############################################
# the array method
data("temp_dat", package = "climatrends")
# use the default thresholds
crop_sensitive(temp_dat,
day.one = "2013-10-27",
last.day = "2013-11-04")
# or change the thresholds based on the crop physiology
crop_sensitive(temp_dat,
day.one = "2013-10-27",
last.day = "2013-11-04",
hts_mean.threshold = c(24),
hts_max.threshold = c(31, 33))
}
\references{
Challinor et al. (2016). Nature Climate Change 6(10):6954-958
\doi{https://doi.org/10.1038/nclimate3061}
Trnka et al. (2014). Nature Climate Change 4(7):637–43.
\doi{https://doi.org/10.1038/nclimate2242}
}
\seealso{
Other temperature functions:
\code{\link{ETo}()},
\code{\link{GDD}()},
\code{\link{temperature}()}
}
\concept{temperature functions}
| /man/crop_sensitive.Rd | permissive | AgrDataSci/climatrends | R | false | true | 4,583 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_sensitive.R
\name{crop_sensitive}
\alias{crop_sensitive}
\alias{crop_sensitive.default}
\alias{crop_sensitive.data.frame}
\alias{crop_sensitive.array}
\alias{crop_sensitive.sf}
\title{Crop sensitive indices}
\usage{
crop_sensitive(object, ...)
\method{crop_sensitive}{default}(object, tmin, ...)
\method{crop_sensitive}{data.frame}(object, day.one, ...)
\method{crop_sensitive}{array}(object, day.one, ...)
\method{crop_sensitive}{sf}(object, day.one, ..., as.sf = TRUE)
}
\arguments{
\item{object}{a numeric vector with the maximum temperature,
or a data.frame with geographical coordinates (lonlat),
or an object of class \code{sf} with geometry 'POINT' or 'POLYGON',
or an \code{array} with two dimensions containing the
maximum and minimum temperature, in that order. See details}
\item{...}{additional arguments passed to methods. See details}
\item{tmin}{a numeric vector with the minimum temperature}
\item{day.one}{a vector of class \code{Date} or any other object that can be
coerced to \code{Date} (e.g. integer, character YYYY-MM-DD) for the starting
day to capture the climate data}
\item{as.sf}{logical, to return an object of class 'sf'}
}
\value{
A dataframe with crop sensitive indices with n colunms depending on the
number of thresholds passed to each index:
\item{hts_mean}{high temperature stress using daily MEAN temperature,
and given as percentage number of days a certain threshold is exceeded}
\item{hts_max}{high temperature stress using daily MAX temperature,
and given as percentage number of days a certain threshold is exceeded}
\item{hse}{heat stress event, and given as percentage number of days a
a certain threshold is exceeded for at least two consecutive days}
\item{hse_ms}{heat stress event, and given the maximum number of days
a certain threshold is exceeded for at least two consecutive days}
\item{cdi_mean}{crop duration index using daily MEAN temperature,
and given as max(Tmean - threshold, 0)}
\item{cdi_max}{crop duration index using daily MAX temperature,
and given as max(Tmax - threshold, 0)}
\item{lethal}{lethal temperatures, defined as percentage of days during the
timeseries where daily MEAN temperature exceeds a given threshold}
}
\description{
Compute crop sensitive indices. These indices are designed to capture
the changes in temperature extremes during key phenological stages
(e.g. anthesis), but can also be applied to other phenological stages.
}
\details{
The function uses pre-defined threshold to compute the indices. For hts_mean (32,
35, 38 Celsius), for hts_max (36, 39, 42 Celsius), for hse (31 Celsius), for
cdi_mean (22, 23, 24 Celsius), for cdi_max (27, 28, 29 Celsius) and for
lethal (43, 46, 49 Celsius).
Additional arguments:
The thresholds can be adjusted using the arguments \code{hts_mean.threshold},
\code{hts_max.threshold}, \code{hse.threshold}, \code{cdi_mean.threshold},
\code{cdi_max.threshold} and \code{lethal.threshold} which are a numeric (or
vector of numeric)
\code{last.day}: an object (optional to \var{span}) of class \code{Date} or
any other object that can be coerced to \code{Date} (e.g. integer, character
YYYY-MM-DD) for the last day of the time series. For \code{data.frame}, \code{array}
and \code{sf} methods
\code{span}: an integer (optional to \var{last.day}) or a vector with
integers (optional if \var{last.day} is given) for the length of
the time series to be captured. For \code{data.frame}, \code{array}
and \code{sf} methods
}
\examples{
# the default method
set.seed(78)
tmax <- runif(50, 37, 47)
set.seed(79)
tmin <- runif(50, 31, 34)
crop_sensitive(tmax, tmin)
###############################################
# the array method
data("temp_dat", package = "climatrends")
# use the default thresholds
crop_sensitive(temp_dat,
day.one = "2013-10-27",
last.day = "2013-11-04")
# or change the thresholds based on the crop physiology
crop_sensitive(temp_dat,
day.one = "2013-10-27",
last.day = "2013-11-04",
hts_mean.threshold = c(24),
hts_max.threshold = c(31, 33))
}
\references{
Challinor et al. (2016). Nature Climate Change 6(10):6954-958
\doi{https://doi.org/10.1038/nclimate3061}
Trnka et al. (2014). Nature Climate Change 4(7):637–43.
\doi{https://doi.org/10.1038/nclimate2242}
}
\seealso{
Other temperature functions:
\code{\link{ETo}()},
\code{\link{GDD}()},
\code{\link{temperature}()}
}
\concept{temperature functions}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548676e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844505-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,232 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548676e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
# Test plot_karl function
# March 2018
## Packages
require(testthat)
require(matlib)
require(ggplot2)
require(gridExtra)
# Generate small data to test our function
set.seed(4)
X <- data.frame('X1' = rnorm(10))
y <- X$X1 + rnorm(10)
# True value of the coefficients
beta <- cov(X$X1, y)/var(X$X1)
alpha <- mean(y) - beta*mean(X$X1)
fit <- alpha + beta*X$X1
res <- y - fit
# Fit a linear regression on the data
model <- LinearRegression(X, y)
# Plot Linear Model Diagnostics
plots <- plot_karl(model)
test_that("plot_karl(): returns various plots using the linear model object", {
# expected inputs:
expect_equal(is.null(model$residuals), FALSE) # Expect not null input
expect_match(typeof(model), 'list') # Expect type list
expect_equal(names(model), c('weights', 'fitted', 'residuals')) # Expect names of inputs
expect_equal(length(model$fitted), length(model$residuals)) # Length of residuals and fitted values to match
expect_true(length(model$fitted)>1) # Expect length of fitted values greater than 1
expect_true(length(model$residuals)>1) # Expect length of residuals values greater than 1
# expected outputs:
expect_match(typeof(plots), "list") # Checks to see if the plotting type is correct
expect_equal(length(plots), 2) # Checks to see if the number of outputs is correct.
expect_false(plots$respect) # Respective to eachother the outputs.
})
| /tests/testthat/test_plot_karl.R | permissive | UBC-MDS/Karl | R | false | false | 1,385 | r | # Test plot_karl function
# March 2018
## Packages
require(testthat)
require(matlib)
require(ggplot2)
require(gridExtra)
# Generate small data to test our function
set.seed(4)
X <- data.frame('X1' = rnorm(10))
y <- X$X1 + rnorm(10)
# True value of the coefficients
beta <- cov(X$X1, y)/var(X$X1)
alpha <- mean(y) - beta*mean(X$X1)
fit <- alpha + beta*X$X1
res <- y - fit
# Fit a linear regression on the data
model <- LinearRegression(X, y)
# Plot Linear Model Diagnostics
plots <- plot_karl(model)
test_that("plot_karl(): returns various plots using the linear model object", {
# expected inputs:
expect_equal(is.null(model$residuals), FALSE) # Expect not null input
expect_match(typeof(model), 'list') # Expect type list
expect_equal(names(model), c('weights', 'fitted', 'residuals')) # Expect names of inputs
expect_equal(length(model$fitted), length(model$residuals)) # Length of residuals and fitted values to match
expect_true(length(model$fitted)>1) # Expect length of fitted values greater than 1
expect_true(length(model$residuals)>1) # Expect length of residuals values greater than 1
# expected outputs:
expect_match(typeof(plots), "list") # Checks to see if the plotting type is correct
expect_equal(length(plots), 2) # Checks to see if the number of outputs is correct.
expect_false(plots$respect) # Respective to eachother the outputs.
})
|
rankall <- function(outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
## Filter by given outcome
if (outcome == "heart attack") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
outcome_col = 11
}
else if (outcome == "heart failure") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
outcome_col = 17
}
else if (outcome == "pneumonia") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
outcome_col = 23
}
else { stop("invalid outcome") }
## Read data and filter by given state
fulldata = read.csv("outcome-of-care-measures.csv", colClasses = "character",
na.strings='Not Available')
fulldata = fulldata[order(fulldata$State), ]
unique_states = unique(fulldata$State)
hospitals = c()
for (state in unique_states) {
data = na.omit(fulldata[fulldata$State==state, c(2, outcome_col)])
## Convert to numeric all required values to compute the minimum
data[outcome_field] = lapply(data[outcome_field], as.numeric)
## Sort data by name
data = data[order(data$Hospital.Name), ]
## Sort data by outcome_field
data = data[order(data[outcome_field]), ]
## Special cases for 'best' and 'worst'
if (num == 'best') {
hospitals = c(hospitals, data$Hospital.Name[1])
}
else if (num == 'worst') {
hospitals = c(hospitals, data$Hospital.Name[nrow(data)])
}
else {
hospitals = c(hospitals, data$Hospital.Name[num])
}
}
return (data.frame(hospital=hospitals, state=unique_states))
} | /R Programming/HomeWork3/rankall.R | no_license | pierorex/datasciencecoursera | R | false | false | 1,957 | r | rankall <- function(outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
## Filter by given outcome
if (outcome == "heart attack") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
outcome_col = 11
}
else if (outcome == "heart failure") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
outcome_col = 17
}
else if (outcome == "pneumonia") {
outcome_field = "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
outcome_col = 23
}
else { stop("invalid outcome") }
## Read data and filter by given state
fulldata = read.csv("outcome-of-care-measures.csv", colClasses = "character",
na.strings='Not Available')
fulldata = fulldata[order(fulldata$State), ]
unique_states = unique(fulldata$State)
hospitals = c()
for (state in unique_states) {
data = na.omit(fulldata[fulldata$State==state, c(2, outcome_col)])
## Convert to numeric all required values to compute the minimum
data[outcome_field] = lapply(data[outcome_field], as.numeric)
## Sort data by name
data = data[order(data$Hospital.Name), ]
## Sort data by outcome_field
data = data[order(data[outcome_field]), ]
## Special cases for 'best' and 'worst'
if (num == 'best') {
hospitals = c(hospitals, data$Hospital.Name[1])
}
else if (num == 'worst') {
hospitals = c(hospitals, data$Hospital.Name[nrow(data)])
}
else {
hospitals = c(hospitals, data$Hospital.Name[num])
}
}
return (data.frame(hospital=hospitals, state=unique_states))
} |
Server <- function(input, output) {
Output$price_hist <- renderPlot(ggplot2::qplot(bcl$Price))
Output$bcl_data <- renderTable ({
Bcl
})
}
sliderInput()
NEED FOR A WIDGET:
inputId – unique identifier.
label – “title” to the widget.
others specific to the widget.
sliderInput("priceInput", "Select your desired price range.",
min = 0, max = 100, value = c(15, 30), pre="$")
library(shiny)
bcl
| /cm107/Untitled.R | no_license | katiezinn/Stat545_Participation | R | false | false | 429 | r | Server <- function(input, output) {
Output$price_hist <- renderPlot(ggplot2::qplot(bcl$Price))
Output$bcl_data <- renderTable ({
Bcl
})
}
sliderInput()
NEED FOR A WIDGET:
inputId – unique identifier.
label – “title” to the widget.
others specific to the widget.
sliderInput("priceInput", "Select your desired price range.",
min = 0, max = 100, value = c(15, 30), pre="$")
library(shiny)
bcl
|
\docType{data}
\name{ices19502010}
\alias{ices19502010}
\title{Some title}
\format{Some format}
\source{
Some source
}
\description{
Some description
}
\author{
Some author
}
\keyword{datasets}
| /man/ices19502010.Rd | no_license | einarhjorleifsson/landr | R | false | false | 201 | rd | \docType{data}
\name{ices19502010}
\alias{ices19502010}
\title{Some title}
\format{Some format}
\source{
Some source
}
\description{
Some description
}
\author{
Some author
}
\keyword{datasets}
|
#' @title getIdiomas
#' @description Extract Languages from XML file converted to R list.
#' @param curriculo XML exported from Lattes imported to R as list.
#' @return data frame
#' @details Curriculum without this information will return NULL.
#' @examples
#' if(interactive()){
#' data(xmlsLattes)
#' # to import from one curriculum
#' getIdiomas(xmlsLattes[[2]])
#'
#' # to import from two or more curricula
#' lt <- lapply(xmlsLattes, getIdiomas)
#' head(bind_rows(lt))
#' }
#' @rdname getIdiomas
#' @export
getIdiomas <- function(curriculo){
#print(curriculo$id)
ll <- curriculo$`DADOS-GERAIS`
if(is.list(ll)){
if(any(names(ll) %in% 'IDIOMAS')){
ll <- ll$`IDIOMAS`
if(length(ll) >=1){
idm <- lapply(ll, function(x){ if(!is.null(x)){ getCharacter(x)} } )
idm <- bind_rows(idm)
}
idm$id <- curriculo$id
return(idm)
}
idm <- NULL
return(idm)
} else{
idm <- NULL
return(idm)
}
}
| /R/getIdiomas.R | no_license | Webteg/getLattes | R | false | false | 975 | r | #' @title getIdiomas
#' @description Extract Languages from XML file converted to R list.
#' @param curriculo XML exported from Lattes imported to R as list.
#' @return data frame
#' @details Curriculum without this information will return NULL.
#' @examples
#' if(interactive()){
#' data(xmlsLattes)
#' # to import from one curriculum
#' getIdiomas(xmlsLattes[[2]])
#'
#' # to import from two or more curricula
#' lt <- lapply(xmlsLattes, getIdiomas)
#' head(bind_rows(lt))
#' }
#' @rdname getIdiomas
#' @export
getIdiomas <- function(curriculo){
#print(curriculo$id)
ll <- curriculo$`DADOS-GERAIS`
if(is.list(ll)){
if(any(names(ll) %in% 'IDIOMAS')){
ll <- ll$`IDIOMAS`
if(length(ll) >=1){
idm <- lapply(ll, function(x){ if(!is.null(x)){ getCharacter(x)} } )
idm <- bind_rows(idm)
}
idm$id <- curriculo$id
return(idm)
}
idm <- NULL
return(idm)
} else{
idm <- NULL
return(idm)
}
}
|
#logging
log <- file(snakemake@log[[1]], open="wt")
sink(log, append=TRUE)
sink(log, append=TRUE, type="message")
# Restore output to console
#sink()
#sink(type="message")
#backup R-based installation if conda didn't work or wasn't used
#we check if packages are installed first
# list of bioconductor packages
#bioc_packages = c("GenomicFeatures", "tximport", "rhdf5", "DESeq2")
# load or install&load all
#package.check <- lapply(
# bioc_packages,
# FUN = function(x) {
# if (!require(x, character.only = TRUE)) {
# BiocManager::install(x)
# library(x, character.only = TRUE)
# }
# }
#)
# list of cran packages
#cran_packages = c("readr")
# load or install&load all
#package.check <- lapply(
# cran_packages,
# FUN = function(x) {
# if (!require(x, character.only = TRUE)) {
# install.packages(x, dependencies = TRUE, repos = "http://cran.us.r-project.org")
# library(x, character.only = TRUE)
# }
# }
#)
#load libraries
library(GenomicFeatures)
library(tximport)
library(rhdf5)
library(readr)
library(DESeq2)
txdb_snakemake.wildcards <- makeTxDbFromGFF(snakemake@input[["annotation"]], format="auto")
k_snakemake.wildcards <- keys(txdb_snakemake.wildcards, keytype = "TXNAME")
tx2gene_snakemake.wildcards <- select(txdb_snakemake.wildcards, k_snakemake.wildcards, "GENEID", "TXNAME")
#user choice for whether gene or transcript level analysis should be performed
quantification_level <- as.character(snakemake@params[["quantification_level"]])
print(quantification_level)
quantification_level == "NO"
samples <- read.delim("config/samples.tsv")
samples_snakemake.wildcards <- samples[samples$species == snakemake@params[["species"]], ]
sample_snakemake.wildcards_names <- paste0(samples_snakemake.wildcards$sample, "_", samples_snakemake.wildcards$unit)
files_snakemake.wildcards <- file.path("kallisto_quant", snakemake@wildcards, sample_snakemake.wildcards_names, "abundance.h5")
names(files_snakemake.wildcards) <- paste0(sample_snakemake.wildcards_names)
if (quantification_level == "NO") {
#if user chose gene level analysis
txi.kallisto_snakemake.wildcards <- tximport(files_snakemake.wildcards, type = "kallisto", tx2gene = tx2gene_snakemake.wildcards, txOut = FALSE, importer = readr::read_tsv)
} else {
#else just output/use transcript-level - txOut = TRUE
txi.kallisto_snakemake.wildcards <- tximport(files_snakemake.wildcards, type = "kallisto", txOut = TRUE, importer = readr::read_tsv)
}
#sampleTable <- samples[1:8,"condition", drop=FALSE]
rownames(samples_snakemake.wildcards) <- colnames(txi.kallisto_snakemake.wildcards$counts)
DESeqDataSet_snakemake.wildcards <- DESeqDataSetFromTximport(txi.kallisto_snakemake.wildcards, samples_snakemake.wildcards, ~condition)
saveRDS(DESeqDataSet_snakemake.wildcards, file=snakemake@output[[1]])
| /workflow/scripts/tximport.R | permissive | tgstoecker/A2TEA.Workflow | R | false | false | 2,817 | r | #logging
log <- file(snakemake@log[[1]], open="wt")
sink(log, append=TRUE)
sink(log, append=TRUE, type="message")
# Restore output to console
#sink()
#sink(type="message")
#backup R-based installation if conda didn't work or wasn't used
#we check if packages are installed first
# list of bioconductor packages
#bioc_packages = c("GenomicFeatures", "tximport", "rhdf5", "DESeq2")
# load or install&load all
#package.check <- lapply(
# bioc_packages,
# FUN = function(x) {
# if (!require(x, character.only = TRUE)) {
# BiocManager::install(x)
# library(x, character.only = TRUE)
# }
# }
#)
# list of cran packages
#cran_packages = c("readr")
# load or install&load all
#package.check <- lapply(
# cran_packages,
# FUN = function(x) {
# if (!require(x, character.only = TRUE)) {
# install.packages(x, dependencies = TRUE, repos = "http://cran.us.r-project.org")
# library(x, character.only = TRUE)
# }
# }
#)
#load libraries
library(GenomicFeatures)
library(tximport)
library(rhdf5)
library(readr)
library(DESeq2)
txdb_snakemake.wildcards <- makeTxDbFromGFF(snakemake@input[["annotation"]], format="auto")
k_snakemake.wildcards <- keys(txdb_snakemake.wildcards, keytype = "TXNAME")
tx2gene_snakemake.wildcards <- select(txdb_snakemake.wildcards, k_snakemake.wildcards, "GENEID", "TXNAME")
#user choice for whether gene or transcript level analysis should be performed
quantification_level <- as.character(snakemake@params[["quantification_level"]])
print(quantification_level)
quantification_level == "NO"
samples <- read.delim("config/samples.tsv")
samples_snakemake.wildcards <- samples[samples$species == snakemake@params[["species"]], ]
sample_snakemake.wildcards_names <- paste0(samples_snakemake.wildcards$sample, "_", samples_snakemake.wildcards$unit)
files_snakemake.wildcards <- file.path("kallisto_quant", snakemake@wildcards, sample_snakemake.wildcards_names, "abundance.h5")
names(files_snakemake.wildcards) <- paste0(sample_snakemake.wildcards_names)
if (quantification_level == "NO") {
#if user chose gene level analysis
txi.kallisto_snakemake.wildcards <- tximport(files_snakemake.wildcards, type = "kallisto", tx2gene = tx2gene_snakemake.wildcards, txOut = FALSE, importer = readr::read_tsv)
} else {
#else just output/use transcript-level - txOut = TRUE
txi.kallisto_snakemake.wildcards <- tximport(files_snakemake.wildcards, type = "kallisto", txOut = TRUE, importer = readr::read_tsv)
}
#sampleTable <- samples[1:8,"condition", drop=FALSE]
rownames(samples_snakemake.wildcards) <- colnames(txi.kallisto_snakemake.wildcards$counts)
DESeqDataSet_snakemake.wildcards <- DESeqDataSetFromTximport(txi.kallisto_snakemake.wildcards, samples_snakemake.wildcards, ~condition)
saveRDS(DESeqDataSet_snakemake.wildcards, file=snakemake@output[[1]])
|
library('DESeq2')
library('RColorBrewer')
library('gplots')
library('reshape')
library('ggplot2')
## grab args
args <- commandArgs(trailingOnly = TRUE)
DIR <- args[1]
FILE1 <- args[2]
FILE2 <- args[3]
ANNOTATE <- args[4]
CONTRASTS <- args[5]
#
setwd(DIR)
## Sys.setenv("DISPLAY"=":0.0")
sampleinfo=read.delim(FILE1)
sampleFiles=as.character(sampleinfo[,2])
x = read.delim(FILE2,row.names=1)
colnames(x)=as.character(sampleinfo[,4])
## read annotation file
## ann=read.delim(ANNOTATE)
#
ddsHTSeq<-DESeqDataSetFromMatrix(countData=x,colData=sampleinfo, design=~condition)
dds<-DESeq(ddsHTSeq)
ndata=as.data.frame(counts(dds,normalized=TRUE))
colnames(ndata)=colnames(x)
write.table(ndata,file="Deseq2_normalized_counts.txt",sep="\t",col.names=NA)
png("HistDesq2normFilter.png")
df.m <- melt(as.data.frame(ndata))
print(ggplot(df.m) + geom_density(aes(x = value, colour = variable)) + labs(x = NULL) + theme(legend.position='top') + scale_x_log10())
dev.off()
#
contras=unlist(strsplit(CONTRASTS, split=" "))
cat(contras,"\t",length(contras),"\t",contras[1],"\t",contras[2],"\n",file="readcontra.txt")
for(i in seq(1, length(contras), by = 2))
{{
res<-results(dds,contrast=c("condition",as.character(contras[i]),as.character(contras[i+1])))
res<-res[order(res$padj),]
res1=as.data.frame(res)
restmp=res1
restmp$FoldChange <- ifelse(restmp$log2FoldChange<0, -1/(2^restmp$log2FoldChange), 2^restmp$log2FoldChange)
write.table(restmp,file=paste("DEG_",contras[i],"_vs_",contras[i+1],".txt",sep=""),sep="\t",col.names=NA)
x=res1$log2FoldChange[which(!is.na(res1$log2FoldChange))]
png(paste("MAplot_",contras[i],"_vs_",contras[i+1],".png",sep=""))
plotMA(res,ylim=range(x),main=paste("MAplot_",contras[i],"_vs_",contras[i+1],sep=""))
##dev.copy(png,paste("MAplot_",contras[i],"_vs_",contras[i+1],".png",sep=""))
dev.off()
}}
## transformation
rld <- rlogTransformation(dds, blind=TRUE)
rldm=assay(rld)
colnames(rldm)=colnames(x)
write.table(rldm,file="Deseq2_normalized_rld.txt",sep="\t",col.names=NA)
## clustering
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
rownames(mat) <- colnames(mat) <- with(colData(dds),paste(condition,sampleFiles , sep=" : "))
#if you just want the conditions use this line : rownames(mat) <- colnames(mat) <- with(colData(dds),condition)
png("deseq2_heatmaps_samplebysample.png")
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(16, 16))
## dev.copy(png,"deseq2_heatmaps_samplebysample.png")
dev.off()
## plotMA(dds,ylim=c(-2,2),main="DESeq2 MAplot")
## dev.copy(png,"deseq2_MAplot.png")
## dev.off()
## pca
png("deseq2_pca.png")
print(plotPCA(rld, intgroup=c("condition")))
## dev.copy(png,"deseq2_pca.png")
dev.off()
png("deseq2_pca_details.png")
# print(plotPCA(rld, intgroup=c("condition","fileName")))
print(plotPCA(rld, intgroup=c("condition","label")))
## dev.copy(png,"deseq2_pca_details.png")
dev.off()
| /Results-template/Scripts/deseq2.R | no_license | CCBR/Pipeliner | R | false | false | 2,921 | r | library('DESeq2')
library('RColorBrewer')
library('gplots')
library('reshape')
library('ggplot2')
## grab args
args <- commandArgs(trailingOnly = TRUE)
DIR <- args[1]
FILE1 <- args[2]
FILE2 <- args[3]
ANNOTATE <- args[4]
CONTRASTS <- args[5]
#
setwd(DIR)
## Sys.setenv("DISPLAY"=":0.0")
sampleinfo=read.delim(FILE1)
sampleFiles=as.character(sampleinfo[,2])
x = read.delim(FILE2,row.names=1)
colnames(x)=as.character(sampleinfo[,4])
## read annotation file
## ann=read.delim(ANNOTATE)
#
ddsHTSeq<-DESeqDataSetFromMatrix(countData=x,colData=sampleinfo, design=~condition)
dds<-DESeq(ddsHTSeq)
ndata=as.data.frame(counts(dds,normalized=TRUE))
colnames(ndata)=colnames(x)
write.table(ndata,file="Deseq2_normalized_counts.txt",sep="\t",col.names=NA)
png("HistDesq2normFilter.png")
df.m <- melt(as.data.frame(ndata))
print(ggplot(df.m) + geom_density(aes(x = value, colour = variable)) + labs(x = NULL) + theme(legend.position='top') + scale_x_log10())
dev.off()
#
contras=unlist(strsplit(CONTRASTS, split=" "))
cat(contras,"\t",length(contras),"\t",contras[1],"\t",contras[2],"\n",file="readcontra.txt")
for(i in seq(1, length(contras), by = 2))
{{
res<-results(dds,contrast=c("condition",as.character(contras[i]),as.character(contras[i+1])))
res<-res[order(res$padj),]
res1=as.data.frame(res)
restmp=res1
restmp$FoldChange <- ifelse(restmp$log2FoldChange<0, -1/(2^restmp$log2FoldChange), 2^restmp$log2FoldChange)
write.table(restmp,file=paste("DEG_",contras[i],"_vs_",contras[i+1],".txt",sep=""),sep="\t",col.names=NA)
x=res1$log2FoldChange[which(!is.na(res1$log2FoldChange))]
png(paste("MAplot_",contras[i],"_vs_",contras[i+1],".png",sep=""))
plotMA(res,ylim=range(x),main=paste("MAplot_",contras[i],"_vs_",contras[i+1],sep=""))
##dev.copy(png,paste("MAplot_",contras[i],"_vs_",contras[i+1],".png",sep=""))
dev.off()
}}
## transformation
rld <- rlogTransformation(dds, blind=TRUE)
rldm=assay(rld)
colnames(rldm)=colnames(x)
write.table(rldm,file="Deseq2_normalized_rld.txt",sep="\t",col.names=NA)
## clustering
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
rownames(mat) <- colnames(mat) <- with(colData(dds),paste(condition,sampleFiles , sep=" : "))
#if you just want the conditions use this line : rownames(mat) <- colnames(mat) <- with(colData(dds),condition)
png("deseq2_heatmaps_samplebysample.png")
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(16, 16))
## dev.copy(png,"deseq2_heatmaps_samplebysample.png")
dev.off()
## plotMA(dds,ylim=c(-2,2),main="DESeq2 MAplot")
## dev.copy(png,"deseq2_MAplot.png")
## dev.off()
## pca
png("deseq2_pca.png")
print(plotPCA(rld, intgroup=c("condition")))
## dev.copy(png,"deseq2_pca.png")
dev.off()
png("deseq2_pca_details.png")
# print(plotPCA(rld, intgroup=c("condition","fileName")))
print(plotPCA(rld, intgroup=c("condition","label")))
## dev.copy(png,"deseq2_pca_details.png")
dev.off()
|
# ============================= #
# Student Name: Weixuezi WANG #
# ============================= #
rm(list = ls())
# Load the necessary package in R
install.packages("lexicon")
install.packages("sentimentr")
library("text2vec")
library("NLP")
library("tm")
library("lexicon")
library("sentimentr")
library("DBI")
library("RMySQL")
library("e1071")
library("tokenizers")
############################
# Sentiment Classification #
############################
# ------------------------#
# Input the Raw Dataset #
# ------------------------#
myhost <- "localhost"
mydb <- "studb"
myacct <- "cis434"
mypwd <- "LLhtFPbdwiJans8F@S207"
driver <- dbDriver("MySQL")
conn <- dbConnect(driver, host=myhost, dbname=mydb, myacct, mypwd)
airline_raw <- dbGetQuery(conn, "SELECT * FROM classification WHERE rtag = 'cC|q8z29^Zio'")
airline <- airline_raw[,c(1,3,5)]
# --------------------#
# Clean the Dataset #
# --------------------#
airline$tweet <- tolower(airline$tweet)
airline$text <- gsub("http\\S+\\s*", " ", airline$tweet)
airline$text <- gsub("[#|@|-|&|.|!|?]", " ", airline$text)
airline$text <- gsub("[[:cntrl:]]", " ", airline$text)
# ----------------------------------------------------- #
# Use Lexicon to find the positive and negative word #
# ----------------------------------------------------- #
sentiment_word <- lexicon::hash_sentiment_jockers_rinker
positive <- sentiment_word[y > 0, 1]
negative <- sentiment_word[y < 0, 1]
addneg<- c("horribly","terrible","worse","shout","late","worst","bad","seriously","why not","fail","shit","shatterd","awful","shame","terrible",
"stuck","sigh","force","inconvenient","sad","wasted","not happy","unitedsucks","sucks","disappointed","frustrated","screwed","screw",
"screwing","poor","incompetent","rude","disrespectful","ridiculous","motherfuckers","strand","robot","unacceptable",
"again","thieves","delay","delayed","no tv")
# transform the data into vector, in order to be used in function 'match'
positive <- unlist(as.list(positive))
negative <- unlist(c(as.list(negative),addneg))
# ------------------------------------------------ #
# Create a function to create the sentiment score #
# ------------------------------------------------ #
require(plyr)
require(stringr)
scoresentiment <- function(text, positive, negative){
scores = laply(text, function(text, positive, negative) {
text = tolower(text) # transform all character into lowercase
word_list = itoken(text, preprocessor=tolower, tokenizer=word_tokenizer) # splits the tweets by word in a list
vocab <- create_vocabulary(word_list)[,1]
#word.list = str_split(text, '\\s+')
words = unlist(vocab) # turns the list into vector
pos.matches = match(words, positive) ## returns matching
#values for words from list
neg.matches = match(words, negative)
pos.matches = !is.na(pos.matches) ## converts matching values to true or false
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches) # true and false are
#treated as 1 and 0 so they can be added
return(score)
},positive, negative)
}
# ------------------------------------------------ #
# Create a function to find the sentiment score #
# ------------------------------------------------ #
airline$sentiscore <- 0
for (i in 1:4583){
airline$sentiscore[i]<- scoresentiment(airline$text[i],positive,negative)
}
airline$sentiment <- ifelse(airline$sentiscore > 2, 1, 0)
# Check whether the assigned sentiment is accurate
airline[airline$sentiment==1,]
# Count the number of no complaint
nrow(airline[airline$sentiment==1,])
not_complaints <- airline[airline$sentiment==1, c("id","tweet")]
write.csv(not_complaints,"not_complaints.csv")
# ============== IF we use the package sentimentr to find non complaint tweet ========== #
# --------------------------------------------------#
# Use Sentimentr to Give each Sentence a Polarity #
# --------------------------------------------------#
sentiment_all <- sentiment(airline$text, polarity_dt = lexicon::hash_sentiment_jockers_rinker,
valence_shifters_dt = lexicon::hash_valence_shifters, hyphen = "",
amplifier.weight = 0.8, n.before = 1, n.after = 1,
question.weight = 1, adversative.weight = 0.85,
neutral.nonverb.like = FALSE, missing_value = 0)
airline_new <- airline
airline_new$sentiscore <- sentiment_all$sentiment
# Assign 1 to positive sentiment and 0 to negtive sentiment. The cutoff of polarity is 0.5
airline_new$sentiment <- ifelse(airline_new$sentiscore > 0.5, 1, 0)
# Check whether the assigned sentiment is accurate
airline_new[airline_new$sentiment==1,]
# Count the number of no complaint
nrow(airline_new[airline_new$sentiment==1,])
not_complaints_new <- airline_new[airline_new$sentiment==1, c("id","tweet")]
write.csv(not_complaints_new,"not_complaints_with package.csv")
| /R_Text Mining and Social Media Analytics/Weixuezi_WANG_1_Sentiment Classification copy.R | no_license | theYukii/MakeBetterTheBest | R | false | false | 4,961 | r | # ============================= #
# Student Name: Weixuezi WANG #
# ============================= #
rm(list = ls())
# Load the necessary package in R
install.packages("lexicon")
install.packages("sentimentr")
library("text2vec")
library("NLP")
library("tm")
library("lexicon")
library("sentimentr")
library("DBI")
library("RMySQL")
library("e1071")
library("tokenizers")
############################
# Sentiment Classification #
############################
# ------------------------#
# Input the Raw Dataset #
# ------------------------#
myhost <- "localhost"
mydb <- "studb"
myacct <- "cis434"
mypwd <- "LLhtFPbdwiJans8F@S207"
driver <- dbDriver("MySQL")
conn <- dbConnect(driver, host=myhost, dbname=mydb, myacct, mypwd)
airline_raw <- dbGetQuery(conn, "SELECT * FROM classification WHERE rtag = 'cC|q8z29^Zio'")
airline <- airline_raw[,c(1,3,5)]
# --------------------#
# Clean the Dataset #
# --------------------#
airline$tweet <- tolower(airline$tweet)
airline$text <- gsub("http\\S+\\s*", " ", airline$tweet)
airline$text <- gsub("[#|@|-|&|.|!|?]", " ", airline$text)
airline$text <- gsub("[[:cntrl:]]", " ", airline$text)
# ----------------------------------------------------- #
# Use Lexicon to find the positive and negative word #
# ----------------------------------------------------- #
sentiment_word <- lexicon::hash_sentiment_jockers_rinker
positive <- sentiment_word[y > 0, 1]
negative <- sentiment_word[y < 0, 1]
addneg<- c("horribly","terrible","worse","shout","late","worst","bad","seriously","why not","fail","shit","shatterd","awful","shame","terrible",
"stuck","sigh","force","inconvenient","sad","wasted","not happy","unitedsucks","sucks","disappointed","frustrated","screwed","screw",
"screwing","poor","incompetent","rude","disrespectful","ridiculous","motherfuckers","strand","robot","unacceptable",
"again","thieves","delay","delayed","no tv")
# transform the data into vector, in order to be used in function 'match'
positive <- unlist(as.list(positive))
negative <- unlist(c(as.list(negative),addneg))
# ------------------------------------------------ #
# Create a function to create the sentiment score #
# ------------------------------------------------ #
require(plyr)
require(stringr)
scoresentiment <- function(text, positive, negative){
scores = laply(text, function(text, positive, negative) {
text = tolower(text) # transform all character into lowercase
word_list = itoken(text, preprocessor=tolower, tokenizer=word_tokenizer) # splits the tweets by word in a list
vocab <- create_vocabulary(word_list)[,1]
#word.list = str_split(text, '\\s+')
words = unlist(vocab) # turns the list into vector
pos.matches = match(words, positive) ## returns matching
#values for words from list
neg.matches = match(words, negative)
pos.matches = !is.na(pos.matches) ## converts matching values to true or false
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches) # true and false are
#treated as 1 and 0 so they can be added
return(score)
},positive, negative)
}
# ------------------------------------------------ #
# Create a function to find the sentiment score #
# ------------------------------------------------ #
airline$sentiscore <- 0
for (i in 1:4583){
airline$sentiscore[i]<- scoresentiment(airline$text[i],positive,negative)
}
airline$sentiment <- ifelse(airline$sentiscore > 2, 1, 0)
# Check whether the assigned sentiment is accurate
airline[airline$sentiment==1,]
# Count the number of no complaint
nrow(airline[airline$sentiment==1,])
not_complaints <- airline[airline$sentiment==1, c("id","tweet")]
write.csv(not_complaints,"not_complaints.csv")
# ============== IF we use the package sentimentr to find non complaint tweet ========== #
# --------------------------------------------------#
# Use Sentimentr to Give each Sentence a Polarity #
# --------------------------------------------------#
sentiment_all <- sentiment(airline$text, polarity_dt = lexicon::hash_sentiment_jockers_rinker,
valence_shifters_dt = lexicon::hash_valence_shifters, hyphen = "",
amplifier.weight = 0.8, n.before = 1, n.after = 1,
question.weight = 1, adversative.weight = 0.85,
neutral.nonverb.like = FALSE, missing_value = 0)
airline_new <- airline
airline_new$sentiscore <- sentiment_all$sentiment
# Assign 1 to positive sentiment and 0 to negtive sentiment. The cutoff of polarity is 0.5
airline_new$sentiment <- ifelse(airline_new$sentiscore > 0.5, 1, 0)
# Check whether the assigned sentiment is accurate
airline_new[airline_new$sentiment==1,]
# Count the number of no complaint
nrow(airline_new[airline_new$sentiment==1,])
not_complaints_new <- airline_new[airline_new$sentiment==1, c("id","tweet")]
write.csv(not_complaints_new,"not_complaints_with package.csv")
|
#
library(shiny)
library(deSolve)
# Define UI for application that draws a plots
ui <- fluidPage(
# App title ----
titlePanel("Chagas Model with Treatment"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
sliderInput(inputId = "years",
label = "Number of years to run simulation",
min = 1, max = 100, value = 40),
sliderInput(inputId = "m",
label = "Ratio of treated bugs:dogs",
min = 5, max = 80, value = 40),
sliderInput(inputId = "MM",
label = "External Bug Pop- not effected by treatment",
min= 0, max= 20, value=5),
sliderInput(inputId = "r",
label = "Lifespan of dog in years",
min = 1, max = 10, value = 3),
sliderInput(inputId = "p",
label = "Percentage of Dead bugs consumed by dog",
min = 0.02, max = 0.99, value = 0.8),
numericInput(inputId = "start_day",
label = "Day to implement treatment",
min = 10000, 10000),
numericInput(inputId = "number_treatments",
label = "Number of treatments",
value = 1),
numericInput(inputId = "days_between_treatments",
label = "Days between treatments",
value=90),
sliderInput(inputId = "b",
label = "Transmission efficiency from bug to dog, vectorial",
min = 0.0005, max = 0.001, value = 0.00068),
sliderInput(inputId = "c",
label = "Transmission efficiency from dog to bug",
min = 0.1, max = 0.5, value = 0.28),
sliderInput(inputId = "g",
label = "Daily probabilty of vector mortality",
min = 0.001, max = 0.01, value = 0.005),
sliderInput(inputId = "a",
label = "Bug biting frequency",
min = 1/21, max = 1/7, value = 1/14),
sliderInput(inputId = "R",
label = "Bug birthrate",
min = 0.05, max = 0.12, value = 0.09),
sliderInput(inputId = "k",
label = "Transmission efficiency from bug to dog, oral transmission",
min = 0.05, max = 0.12, value = 0.1),
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Proportions infected (Dogs and Bugs) ----
plotOutput(outputId = "distPlot1"),
# plotOutput(outputId = "distPlot2"), #Commented this out bc there's a bug in this graph- population crashes
)
)
)
| /NoDensityDeptDeathBugs/ui.R | no_license | jrokhsar/fluralaner-models | R | false | false | 3,171 | r |
#
library(shiny)
library(deSolve)
# Define UI for application that draws a plots
ui <- fluidPage(
# App title ----
titlePanel("Chagas Model with Treatment"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
sliderInput(inputId = "years",
label = "Number of years to run simulation",
min = 1, max = 100, value = 40),
sliderInput(inputId = "m",
label = "Ratio of treated bugs:dogs",
min = 5, max = 80, value = 40),
sliderInput(inputId = "MM",
label = "External Bug Pop- not effected by treatment",
min= 0, max= 20, value=5),
sliderInput(inputId = "r",
label = "Lifespan of dog in years",
min = 1, max = 10, value = 3),
sliderInput(inputId = "p",
label = "Percentage of Dead bugs consumed by dog",
min = 0.02, max = 0.99, value = 0.8),
numericInput(inputId = "start_day",
label = "Day to implement treatment",
min = 10000, 10000),
numericInput(inputId = "number_treatments",
label = "Number of treatments",
value = 1),
numericInput(inputId = "days_between_treatments",
label = "Days between treatments",
value=90),
sliderInput(inputId = "b",
label = "Transmission efficiency from bug to dog, vectorial",
min = 0.0005, max = 0.001, value = 0.00068),
sliderInput(inputId = "c",
label = "Transmission efficiency from dog to bug",
min = 0.1, max = 0.5, value = 0.28),
sliderInput(inputId = "g",
label = "Daily probabilty of vector mortality",
min = 0.001, max = 0.01, value = 0.005),
sliderInput(inputId = "a",
label = "Bug biting frequency",
min = 1/21, max = 1/7, value = 1/14),
sliderInput(inputId = "R",
label = "Bug birthrate",
min = 0.05, max = 0.12, value = 0.09),
sliderInput(inputId = "k",
label = "Transmission efficiency from bug to dog, oral transmission",
min = 0.05, max = 0.12, value = 0.1),
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Proportions infected (Dogs and Bugs) ----
plotOutput(outputId = "distPlot1"),
# plotOutput(outputId = "distPlot2"), #Commented this out bc there's a bug in this graph- population crashes
)
)
)
|
#-------------------------------------------------------------------------------
#Fits a Linear Mixed Model with as outcome PTSD severity as measured by the CAPS
#-------------------------------------------------------------------------------
#Data is read and libraries are loaded in Creating_data.R
source("Creating_data.R")
data<-create_data()
attach(data)
#Centering age and Sex
for (i in 1:nrow(data)){
if (data$geslacht[i] ==1) {data$geslacht[i] <--0.5} else {data$geslacht[i] <-0.5}
}
data$Age <- scale(data$Age, center = T, scale =F)
#-------------------------------------------------------------------------------
#Uncomment to test whether outliers have effect on results
#-------------------------------------------------------------------------------
#To check whether outliers have an effect on our results. They don't :)
# upper<-quantile(data$tijd_IN_TK, na.rm = TRUE, probs=c(.25, .75))[2]+
# 1.5*IQR(data$tijd_IN_TK, na.rm = TRUE)
# lower<-quantile(data$tijd_IN_TK, na.rm = TRUE, probs=c(.25, .75))[1]-
# 1.5*IQR(data$tijd_IN_TK, na.rm = TRUE)
# ouliers_IN_TK<-c(which(data$tijd_IN_TK>upper),which(data$tijd_IN_TK<lower))
# upper<-quantile(data$tijd_TK_FU, na.rm = TRUE, probs=c(.25, .75))[2]+
# 1.5*IQR(data$tijd_TK_FU, na.rm = TRUE)
# lower<-quantile(data$tijd_TK_FU, na.rm = TRUE, probs=c(.25, .75))[1]-
# 1.5*IQR(data$tijd_TK_FU, na.rm = TRUE)
# ouliers_TK_FU<-c(which(data$tijd_TK_FU>upper),which(data$tijd_TK_FU<lower))
# all_outliers<-c(ouliers_IN_TK, ouliers_TK_FU)
# data<-data[-all_outliers,]
#-------------------------------------------------------------------------------
#Creating long dataset
#-------------------------------------------------------------------------------
long_data_2 <- pivot_longer(data = data,
cols = c("totaalscore_KIP_IN", "totaalscore_KIP_TK", "totaalscore_KIP_FU"),
names_to = "Time", values_to = "CAPS_score")
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_IN"] <- "Intake"
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_TK"] <- "Post-treatment"
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_FU"] <- "6-months FU"
long_data_2$Time <- factor(long_data_2$Time, levels = c("Intake", "Post-treatment", "6-months FU"))
long_data_2$binarydepIN <- factor(long_data_2$binarydepIN)
#-------------------------------------------------------------------------------
#Choosing model
#-------------------------------------------------------------------------------
#Elaborate mean model to determine random effects structure:
LMM_2.1<-lme(CAPS_score~Time*geslacht*binarydepIN*(I(Age) +I(Age^2)), random=~Time|CIN,
data=long_data_2, na.action=na.exclude, method = "REML")
summary(LMM_2.1)$tTable
LMM_2.2 <- lme(CAPS_score~Time*geslacht*binarydepIN*(I(Age) +I(Age^2)), random=~1|CIN,
data=long_data_2, na.action=na.exclude, method = "REML")
anova(LMM_2.1, LMM_2.2) #random slopes needed
#Reduce mean part:
LMM_2.1 <- update(LMM_2.1, method = "ML")
LMM_2.3 <- lme(CAPS_score~Time*geslacht*binarydepIN*Age, random=~Time|CIN, data=long_data_2,
na.action=na.exclude, method = "ML")
anova(LMM_2.1, LMM_2.3) # no indication that bigger model with quadratic terms is needed
summary(LMM_2.3)$tTable
#remove interactions except for the one for the research question and important covariate interactions
#First removing 4 and 3 way interaction
LMM_2.4 <-lme(CAPS_score~ Time*factor(binarydepIN) + Time*geslacht+Time*Age, random=~Time|CIN,
data=long_data_2, na.action=na.exclude, method = "ML")
anova(LMM_2.3, LMM_2.4) #Looks like 3- and 4-way interactions are not necessary
#-------------------------------------------------------------------------------
#Final model
#-------------------------------------------------------------------------------
LMM_2.4 <- update(LMM_2.4, method = "REML")
#-------------------------------------------------------------------------------
#Significance testing
#-------------------------------------------------------------------------------
#Testing interaction age and time
Lmat_ageint <- rbind(c(rep(0,10),1,0), c(rep(0,11),1))
anova(LMM_2.4, L=Lmat_ageint)
#Testing interaction between sex and time
Lmat_sexint <- rbind(c(rep(0,8),1,0,0,0), c(rep(0,9),1,0,0))
anova(LMM_2.4, L=Lmat_sexint)
#Testing interaction between group (MDD or non-MDD) and time
Lmat_treatint <- rbind(c(rep(0,6),1,0), c(rep(0,7),1))
anova(LMM_2.4, L = Lmat_treatint)
#Testing change over time
Lmat_time <- rbind(c(0,1, rep(0,10)), c(0,0,1, rep(0,9)))
anova(LMM_2.4, L = Lmat_time)
#Testing change from post-treatment to follow-up
Lmat_postFU <- rbind(c(0,1,-1,rep(0,9)))
anova(LMM_2.4, L = Lmat_postFU)
summary(LMM_2.4)$tTable
#-------------------------------------------------------------------------------
#Testing Model assumptions:
#-------------------------------------------------------------------------------
#Distribution of marginal residuals is approximately normal
qqnorm(residuals(LMM_2.4,level=0, type='normalized'))
#Distribution of conditional residuals is approximately normal
qqnorm(residuals(LMM_2.4,level=1, type='normalized'))
#No clear pattern in residuals vs fitted values
plot(LMM_2.4$fitted[,2], residuals(LMM_2.4,level=1, type='normalized')[!is.na(residuals(LMM_2.4,level=0, type='response'))])
#No clear heterogeneity of variance over the time levels
plot(resid(LMM_2.4, type = "n") ~ long_data_2$Time,
type = c("p", "smooth"), lwd = 3)
#-------------------------------------------------------------------------------
#Plotting the results
#-------------------------------------------------------------------------------
newdatQ2 <- expand.grid(Time = unique(long_data_2$Time), binarydepIN = c(0,1),
geslacht=0, Age= 0)
newdatQ2$binarydepIN <- factor(newdatQ2$binarydepIN)
predicted_pop_means <- predict(LMM_2.4, level = 0, newdata=newdatQ2)
predicted_means_df <- data.frame(Time= rep(c("Intake", "Post-treatment", "6 months follow-up"),2),CAPS_score = predicted_pop_means,
binarydepIN = factor(c(0,0,0,1,1,1)))
predicted_means_df$Time <- factor(predicted_means_df$Time, levels = c("Intake", "Post-treatment", "6 months follow-up"))
ggplot(predicted_means_df, aes(x=Time, y =CAPS_score, colour = binarydepIN)) +
geom_line(aes(group =binarydepIN), size=3) +
scale_colour_manual(name = "Comorbid depression", labels= c("no", "yes"), values = c("#00BFC4","#F8766D")) +ylab("total CAPS score")
| /LMM_2.R | no_license | eahoogendoorn/Statistical-Consulting-Open | R | false | false | 6,527 | r | #-------------------------------------------------------------------------------
#Fits a Linear Mixed Model with as outcome PTSD severity as measured by the CAPS
#-------------------------------------------------------------------------------
#Data is read and libraries are loaded in Creating_data.R
source("Creating_data.R")
data<-create_data()
attach(data)
#Centering age and Sex
for (i in 1:nrow(data)){
if (data$geslacht[i] ==1) {data$geslacht[i] <--0.5} else {data$geslacht[i] <-0.5}
}
data$Age <- scale(data$Age, center = T, scale =F)
#-------------------------------------------------------------------------------
#Uncomment to test whether outliers have effect on results
#-------------------------------------------------------------------------------
#To check whether outliers have an effect on our results. They don't :)
# upper<-quantile(data$tijd_IN_TK, na.rm = TRUE, probs=c(.25, .75))[2]+
# 1.5*IQR(data$tijd_IN_TK, na.rm = TRUE)
# lower<-quantile(data$tijd_IN_TK, na.rm = TRUE, probs=c(.25, .75))[1]-
# 1.5*IQR(data$tijd_IN_TK, na.rm = TRUE)
# ouliers_IN_TK<-c(which(data$tijd_IN_TK>upper),which(data$tijd_IN_TK<lower))
# upper<-quantile(data$tijd_TK_FU, na.rm = TRUE, probs=c(.25, .75))[2]+
# 1.5*IQR(data$tijd_TK_FU, na.rm = TRUE)
# lower<-quantile(data$tijd_TK_FU, na.rm = TRUE, probs=c(.25, .75))[1]-
# 1.5*IQR(data$tijd_TK_FU, na.rm = TRUE)
# ouliers_TK_FU<-c(which(data$tijd_TK_FU>upper),which(data$tijd_TK_FU<lower))
# all_outliers<-c(ouliers_IN_TK, ouliers_TK_FU)
# data<-data[-all_outliers,]
#-------------------------------------------------------------------------------
#Creating long dataset
#-------------------------------------------------------------------------------
long_data_2 <- pivot_longer(data = data,
cols = c("totaalscore_KIP_IN", "totaalscore_KIP_TK", "totaalscore_KIP_FU"),
names_to = "Time", values_to = "CAPS_score")
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_IN"] <- "Intake"
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_TK"] <- "Post-treatment"
long_data_2$Time[long_data_2$Time == "totaalscore_KIP_FU"] <- "6-months FU"
long_data_2$Time <- factor(long_data_2$Time, levels = c("Intake", "Post-treatment", "6-months FU"))
long_data_2$binarydepIN <- factor(long_data_2$binarydepIN)
#-------------------------------------------------------------------------------
#Choosing model
#-------------------------------------------------------------------------------
#Elaborate mean model to determine random effects structure:
LMM_2.1<-lme(CAPS_score~Time*geslacht*binarydepIN*(I(Age) +I(Age^2)), random=~Time|CIN,
data=long_data_2, na.action=na.exclude, method = "REML")
summary(LMM_2.1)$tTable
LMM_2.2 <- lme(CAPS_score~Time*geslacht*binarydepIN*(I(Age) +I(Age^2)), random=~1|CIN,
data=long_data_2, na.action=na.exclude, method = "REML")
anova(LMM_2.1, LMM_2.2) #random slopes needed
#Reduce mean part:
LMM_2.1 <- update(LMM_2.1, method = "ML")
LMM_2.3 <- lme(CAPS_score~Time*geslacht*binarydepIN*Age, random=~Time|CIN, data=long_data_2,
na.action=na.exclude, method = "ML")
anova(LMM_2.1, LMM_2.3) # no indication that bigger model with quadratic terms is needed
summary(LMM_2.3)$tTable
#remove interactions except for the one for the research question and important covariate interactions
#First removing 4 and 3 way interaction
LMM_2.4 <-lme(CAPS_score~ Time*factor(binarydepIN) + Time*geslacht+Time*Age, random=~Time|CIN,
data=long_data_2, na.action=na.exclude, method = "ML")
anova(LMM_2.3, LMM_2.4) #Looks like 3- and 4-way interactions are not necessary
#-------------------------------------------------------------------------------
#Final model
#-------------------------------------------------------------------------------
LMM_2.4 <- update(LMM_2.4, method = "REML")
#-------------------------------------------------------------------------------
#Significance testing
#-------------------------------------------------------------------------------
#Testing interaction age and time
Lmat_ageint <- rbind(c(rep(0,10),1,0), c(rep(0,11),1))
anova(LMM_2.4, L=Lmat_ageint)
#Testing interaction between sex and time
Lmat_sexint <- rbind(c(rep(0,8),1,0,0,0), c(rep(0,9),1,0,0))
anova(LMM_2.4, L=Lmat_sexint)
#Testing interaction between group (MDD or non-MDD) and time
Lmat_treatint <- rbind(c(rep(0,6),1,0), c(rep(0,7),1))
anova(LMM_2.4, L = Lmat_treatint)
#Testing change over time
Lmat_time <- rbind(c(0,1, rep(0,10)), c(0,0,1, rep(0,9)))
anova(LMM_2.4, L = Lmat_time)
#Testing change from post-treatment to follow-up
Lmat_postFU <- rbind(c(0,1,-1,rep(0,9)))
anova(LMM_2.4, L = Lmat_postFU)
summary(LMM_2.4)$tTable
#-------------------------------------------------------------------------------
#Testing Model assumptions:
#-------------------------------------------------------------------------------
#Distribution of marginal residuals is approximately normal
qqnorm(residuals(LMM_2.4,level=0, type='normalized'))
#Distribution of conditional residuals is approximately normal
qqnorm(residuals(LMM_2.4,level=1, type='normalized'))
#No clear pattern in residuals vs fitted values
plot(LMM_2.4$fitted[,2], residuals(LMM_2.4,level=1, type='normalized')[!is.na(residuals(LMM_2.4,level=0, type='response'))])
#No clear heterogeneity of variance over the time levels
plot(resid(LMM_2.4, type = "n") ~ long_data_2$Time,
type = c("p", "smooth"), lwd = 3)
#-------------------------------------------------------------------------------
#Plotting the results
#-------------------------------------------------------------------------------
newdatQ2 <- expand.grid(Time = unique(long_data_2$Time), binarydepIN = c(0,1),
geslacht=0, Age= 0)
newdatQ2$binarydepIN <- factor(newdatQ2$binarydepIN)
predicted_pop_means <- predict(LMM_2.4, level = 0, newdata=newdatQ2)
predicted_means_df <- data.frame(Time= rep(c("Intake", "Post-treatment", "6 months follow-up"),2),CAPS_score = predicted_pop_means,
binarydepIN = factor(c(0,0,0,1,1,1)))
predicted_means_df$Time <- factor(predicted_means_df$Time, levels = c("Intake", "Post-treatment", "6 months follow-up"))
ggplot(predicted_means_df, aes(x=Time, y =CAPS_score, colour = binarydepIN)) +
geom_line(aes(group =binarydepIN), size=3) +
scale_colour_manual(name = "Comorbid depression", labels= c("no", "yes"), values = c("#00BFC4","#F8766D")) +ylab("total CAPS score")
|
context("vascan_search")
test_that("vascan_search returns the correct class", {
skip_on_cran()
aa <- vascan_search(q = "Helianthus annuus")
bb <- vascan_search(q = c("Helianthus annuus", "Crataegus dodgei"), raw=TRUE)
splist <- names_list(rank='species', size=50)
cc <- vascan_search(q = splist)
expect_is(aa, "list")
expect_is(bb, "character")
expect_is(cc, "list")
expect_is(aa[[1]]$matches[[1]]$taxonomicassertions, "data.frame")
expect_equal(NCOL(aa[[1]]$matches[[1]]$taxonomicassertions), 7)
})
| /tests/testthat/test-vascan_search.r | permissive | fozy81/taxize | R | false | false | 524 | r | context("vascan_search")
test_that("vascan_search returns the correct class", {
skip_on_cran()
aa <- vascan_search(q = "Helianthus annuus")
bb <- vascan_search(q = c("Helianthus annuus", "Crataegus dodgei"), raw=TRUE)
splist <- names_list(rank='species', size=50)
cc <- vascan_search(q = splist)
expect_is(aa, "list")
expect_is(bb, "character")
expect_is(cc, "list")
expect_is(aa[[1]]$matches[[1]]$taxonomicassertions, "data.frame")
expect_equal(NCOL(aa[[1]]$matches[[1]]$taxonomicassertions), 7)
})
|
#############################################################################
# Upstream firms take it or leave it offers
#############################################################################
# #col i is d(output of main)/dw_i
# jacobian(func = d_main, x = c(0.5,0.5,0.5,0.5))
u_firm_a <- function(w_A, w_B){
# downstream_iter <<- downstream_iter + 1
# offer by firm A to downstream firm 1
w_1A <- w_A[1]
# offer by firm B to downstream firm 1
w_1B <- w_B[1]
# offer by firm A to downstream firm 2
w_2A <- w_A[2]
# offer by firm B to downstream firm 2
w_2B <- w_B[2]
# downstream price and quantities
d_pq <- d_main(c(w_1A, w_1B, w_2A, w_2B))
p <- d_pq[1:4]
q <- d_pq[5:8] #q_1A, q_1B, q_2A, q_2B
pi_A <- w_1A * q[1] + w_2A * q[3]
return(c(pi_A, p, q))
}
u_firm_b <- function(w_A, w_B){
# downstream_iter <<- downstream_iter + 1
# offer by firm A to downstream firm 1
w_1A <- w_A[1]
# offer by firm B to downstream firm 1
w_1B <- w_B[1]
# offer by firm A to downstream firm 2
w_2A <- w_A[2]
# offer by firm B to downstream firm 2
w_2B <- w_B[2]
# downstream price and quantities
d_pq <- d_main(c(w_1A, w_1B, w_2A, w_2B))
p <- d_pq[1:4]
q <- d_pq[5:8] #q_1A, q_1B, q_2A, q_2B
pi_B <- w_1B * q[2] + w_2B * q[4]
return(c(pi_B, p, q))
}
#################################################### FORECLOSED ##############################################
| /R Code/3-23/Attempt with explicit FOCs/upstream_firms_test.R | no_license | YilunL/Sp-19-Indep-Study | R | false | false | 1,468 | r | #############################################################################
# Upstream firms take it or leave it offers
#############################################################################
# #col i is d(output of main)/dw_i
# jacobian(func = d_main, x = c(0.5,0.5,0.5,0.5))
u_firm_a <- function(w_A, w_B){
# downstream_iter <<- downstream_iter + 1
# offer by firm A to downstream firm 1
w_1A <- w_A[1]
# offer by firm B to downstream firm 1
w_1B <- w_B[1]
# offer by firm A to downstream firm 2
w_2A <- w_A[2]
# offer by firm B to downstream firm 2
w_2B <- w_B[2]
# downstream price and quantities
d_pq <- d_main(c(w_1A, w_1B, w_2A, w_2B))
p <- d_pq[1:4]
q <- d_pq[5:8] #q_1A, q_1B, q_2A, q_2B
pi_A <- w_1A * q[1] + w_2A * q[3]
return(c(pi_A, p, q))
}
u_firm_b <- function(w_A, w_B){
# downstream_iter <<- downstream_iter + 1
# offer by firm A to downstream firm 1
w_1A <- w_A[1]
# offer by firm B to downstream firm 1
w_1B <- w_B[1]
# offer by firm A to downstream firm 2
w_2A <- w_A[2]
# offer by firm B to downstream firm 2
w_2B <- w_B[2]
# downstream price and quantities
d_pq <- d_main(c(w_1A, w_1B, w_2A, w_2B))
p <- d_pq[1:4]
q <- d_pq[5:8] #q_1A, q_1B, q_2A, q_2B
pi_B <- w_1B * q[2] + w_2B * q[4]
return(c(pi_B, p, q))
}
#################################################### FORECLOSED ##############################################
|
\name{NEWS}
\title{News for Package "RSpectra"}
\section{Changes in RSpectra version 0.16-1}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed BLAS/LAPACK calls, thanks to Prof. Ripley.
}
}
}
\section{Changes in RSpectra version 0.16-0}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{svds()} now supports implicit centering and scaling of the matrix
via the \code{center} and \code{scale} parameters in the \code{opts}
argument, suggested by \href{https://github.com/robmaz}{@robmaz}
(\href{https://github.com/yixuan/spectra/issues/73}{#73} of Spectra).
}
}
}
\section{Changes in RSpectra version 0.15-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Updated Spectra to v0.8.1.
\item Added support for new matrix types \strong{dsCMatrix} and
\strong{dsRMatrix} to handle sparse and symmetric matrices,
contributed by \href{https://github.com/flying-sheep}{@flying-sheep}
(\href{https://github.com/yixuan/RSpectra/pull/16}{#16}).
\item \code{eigs()} now detects the symmetry of \strong{dgRMatrix} matrices.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Improved the documentation about the relationship between SVD
and eigen decomposition for symmetric matrices, thanks to
\href{https://github.com/alexpghayes}{@alexpghayes}
(\href{https://github.com/yixuan/RSpectra/pull/17}{#17}).
\item (Internal) Replaced the deprecated \code{Eigen::MappedSparseMatrix}
class in the C++ code.
}
}
}
\section{Changes in RSpectra version 0.14-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Updated Spectra to v0.8.0.
\item New parameter \code{opts$initvec} in \code{eigs()} and \code{eigs_sym()}
to allow users supplying the initial vector for the algorithm.
}
}
}
\section{Changes in RSpectra version 0.13-1}{
\subsection{BUG FIXES}{
\itemize{
\item Updated Spectra to v0.6.2 that fixes regressions in v0.6.1
on some edge cases.
}
}
}
\section{Changes in RSpectra version 0.13-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Using \pkg{prettydoc} to format vignette.
\item Updated Spectra to v0.6.1 that improves numerical accuracy of
eigen-solvers.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Registered native routines per CRAN's policy.
}
}
}
\section{Changes in RSpectra version 0.12-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{svds()} supports user-defined implicit matrix that is
specified by two functions, \code{A} and \code{Atrans}, which
calculate the matrix multiplication and transpose multiplication
respectively.
\item Added a package vignette.
}
}
}
\section{Changes in RSpectra version 0.11-0}{
\subsection{NEW FEATURES}{
\itemize{
\item New package to supersede \pkg{rARPACK} to avoid name confusion.
\item Imported from \pkg{rARPACK} 0.10-0.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Improved numerical stability.
\item Fixed convergence failure for matrices that have repeated
eigenvalues.
}
}
}
\section{Changes in rARPACK version 0.10-0}{
\subsection{BUG FIXES}{
\itemize{
\item Updated the backend Spectra library, which fixed the compatibility
with Eigen >= 3.2.6.
}
}
}
\section{Changes in rARPACK version 0.9-0}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed a bug that causes the algorithm not converging on some matrices.
}
}
}
\section{Changes in rARPACK version 0.8-1}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed a compilation problem on Solaris.
}
}
}
\section{Changes in rARPACK version 0.8-0}{
\subsection{NEW FEATURES}{
\itemize{
\item The backend program is now changed from ARPACK to
\href{https://github.com/yixuan/spectra}{Spectra},
which brings cleaner code and better performance.
\item \code{eigs_sym()} now accepts more matrix types.
\item Added a C interface for other packages to link to.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Fixed a bug reported by xshi19
(\href{https://github.com/yixuan/rARPACK/issues/8}{#8}).
\item Fixed a performance issue reported by swajnautcz
(\href{https://github.com/yixuan/rARPACK/issues/10}{#10}).
}
}
}
\section{Changes in rARPACK version 0.7-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Support for implicit matrix, contributed by Jiali Mei.
User can supply a function \code{FUN} rather than an explicit
matrix to \code{eigs()}, and the eigenvalues/eigenvectors of this
operator will be computed. \code{FUN(x, args)} must return a vector
of the same length as \code{x}.
\item \code{eigs()} will test the symmetry of matrix before actual
computation, since symmetric matrices can guarantee real
eigenvalues and eigenvectors, and the numerical result is more
stable.
}
}
\subsection{BUG FIXES}{
\itemize{
\item C++ code of \code{svds()} is completely rewritten. Now it is more
readable and easier to maintain.
\item Fix a bug possibly coming from ARPACK, which sometimes gives
incorrect result of complex eigenvectors.
\item Avoid using a C random number generator.
}
}
}
\section{Changes in rARPACK version 0.6-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Add support for new matrix types: \strong{dgeMatrix} and
\strong{dgRMatrix}.
\item \code{eigs()} now allows a full Eigen Decomposition, meaning that
all the eigenvalues are calculated. In this case \code{eigs()} is
simply a wrapper of \code{eigen()}, and with a warning issued.
\item Ditto for \code{svds()}.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Rewrite C++ code using classes and templates.
\item Fix errors in checking the values of \code{k} and \code{ncv}.
}
}
}
\section{Changes in rARPACK version 0.5-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Add \code{svds()} function to calculate truncated SVD.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Now sort eigenvalues in decreasing order.
\item Rename \code{eigs.sym()} to \code{eigs_sym()} to avoid confusion.
\item Fix a matrix out-of-bound error.
}
}
}
\section{Changes in rARPACK version 0.4-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Implement shift-and-invert mode for all supported eigen problems.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Update arpack-ng to 3.1.4.
}
}
}
\section{Changes in rARPACK version 0.3-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{eigs()} supports real symmetric matrices.
}
}
}
\section{Changes in rARPACK version 0.2-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{eigs()} supports sparse real nonsymmetric matrices of the
class \strong{dgCMatrix}, defined in the \pkg{Matrix} package.
}
}
}
\section{Changes in rARPACK version 0.1-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Initial version. For now \code{eigs()} supports dense real
nonsymmetric matrices.
}
}
}
| /inst/NEWS.Rd | no_license | yixuan/RSpectra | R | false | false | 7,486 | rd | \name{NEWS}
\title{News for Package "RSpectra"}
\section{Changes in RSpectra version 0.16-1}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed BLAS/LAPACK calls, thanks to Prof. Ripley.
}
}
}
\section{Changes in RSpectra version 0.16-0}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{svds()} now supports implicit centering and scaling of the matrix
via the \code{center} and \code{scale} parameters in the \code{opts}
argument, suggested by \href{https://github.com/robmaz}{@robmaz}
(\href{https://github.com/yixuan/spectra/issues/73}{#73} of Spectra).
}
}
}
\section{Changes in RSpectra version 0.15-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Updated Spectra to v0.8.1.
\item Added support for new matrix types \strong{dsCMatrix} and
\strong{dsRMatrix} to handle sparse and symmetric matrices,
contributed by \href{https://github.com/flying-sheep}{@flying-sheep}
(\href{https://github.com/yixuan/RSpectra/pull/16}{#16}).
\item \code{eigs()} now detects the symmetry of \strong{dgRMatrix} matrices.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Improved the documentation about the relationship between SVD
and eigen decomposition for symmetric matrices, thanks to
\href{https://github.com/alexpghayes}{@alexpghayes}
(\href{https://github.com/yixuan/RSpectra/pull/17}{#17}).
\item (Internal) Replaced the deprecated \code{Eigen::MappedSparseMatrix}
class in the C++ code.
}
}
}
\section{Changes in RSpectra version 0.14-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Updated Spectra to v0.8.0.
\item New parameter \code{opts$initvec} in \code{eigs()} and \code{eigs_sym()}
to allow users supplying the initial vector for the algorithm.
}
}
}
\section{Changes in RSpectra version 0.13-1}{
\subsection{BUG FIXES}{
\itemize{
\item Updated Spectra to v0.6.2 that fixes regressions in v0.6.1
on some edge cases.
}
}
}
\section{Changes in RSpectra version 0.13-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Using \pkg{prettydoc} to format vignette.
\item Updated Spectra to v0.6.1 that improves numerical accuracy of
eigen-solvers.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Registered native routines per CRAN's policy.
}
}
}
\section{Changes in RSpectra version 0.12-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{svds()} supports user-defined implicit matrix that is
specified by two functions, \code{A} and \code{Atrans}, which
calculate the matrix multiplication and transpose multiplication
respectively.
\item Added a package vignette.
}
}
}
\section{Changes in RSpectra version 0.11-0}{
\subsection{NEW FEATURES}{
\itemize{
\item New package to supersede \pkg{rARPACK} to avoid name confusion.
\item Imported from \pkg{rARPACK} 0.10-0.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Improved numerical stability.
\item Fixed convergence failure for matrices that have repeated
eigenvalues.
}
}
}
\section{Changes in rARPACK version 0.10-0}{
\subsection{BUG FIXES}{
\itemize{
\item Updated the backend Spectra library, which fixed the compatibility
with Eigen >= 3.2.6.
}
}
}
\section{Changes in rARPACK version 0.9-0}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed a bug that causes the algorithm not converging on some matrices.
}
}
}
\section{Changes in rARPACK version 0.8-1}{
\subsection{BUG FIXES}{
\itemize{
\item Fixed a compilation problem on Solaris.
}
}
}
\section{Changes in rARPACK version 0.8-0}{
\subsection{NEW FEATURES}{
\itemize{
\item The backend program is now changed from ARPACK to
\href{https://github.com/yixuan/spectra}{Spectra},
which brings cleaner code and better performance.
\item \code{eigs_sym()} now accepts more matrix types.
\item Added a C interface for other packages to link to.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Fixed a bug reported by xshi19
(\href{https://github.com/yixuan/rARPACK/issues/8}{#8}).
\item Fixed a performance issue reported by swajnautcz
(\href{https://github.com/yixuan/rARPACK/issues/10}{#10}).
}
}
}
\section{Changes in rARPACK version 0.7-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Support for implicit matrix, contributed by Jiali Mei.
User can supply a function \code{FUN} rather than an explicit
matrix to \code{eigs()}, and the eigenvalues/eigenvectors of this
operator will be computed. \code{FUN(x, args)} must return a vector
of the same length as \code{x}.
\item \code{eigs()} will test the symmetry of matrix before actual
computation, since symmetric matrices can guarantee real
eigenvalues and eigenvectors, and the numerical result is more
stable.
}
}
\subsection{BUG FIXES}{
\itemize{
\item C++ code of \code{svds()} is completely rewritten. Now it is more
readable and easier to maintain.
\item Fix a bug possibly coming from ARPACK, which sometimes gives
incorrect result of complex eigenvectors.
\item Avoid using a C random number generator.
}
}
}
\section{Changes in rARPACK version 0.6-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Add support for new matrix types: \strong{dgeMatrix} and
\strong{dgRMatrix}.
\item \code{eigs()} now allows a full Eigen Decomposition, meaning that
all the eigenvalues are calculated. In this case \code{eigs()} is
simply a wrapper of \code{eigen()}, and with a warning issued.
\item Ditto for \code{svds()}.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Rewrite C++ code using classes and templates.
\item Fix errors in checking the values of \code{k} and \code{ncv}.
}
}
}
\section{Changes in rARPACK version 0.5-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Add \code{svds()} function to calculate truncated SVD.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Now sort eigenvalues in decreasing order.
\item Rename \code{eigs.sym()} to \code{eigs_sym()} to avoid confusion.
\item Fix a matrix out-of-bound error.
}
}
}
\section{Changes in rARPACK version 0.4-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Implement shift-and-invert mode for all supported eigen problems.
}
}
\subsection{BUG FIXES}{
\itemize{
\item Update arpack-ng to 3.1.4.
}
}
}
\section{Changes in rARPACK version 0.3-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{eigs()} supports real symmetric matrices.
}
}
}
\section{Changes in rARPACK version 0.2-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Now \code{eigs()} supports sparse real nonsymmetric matrices of the
class \strong{dgCMatrix}, defined in the \pkg{Matrix} package.
}
}
}
\section{Changes in rARPACK version 0.1-0}{
\subsection{NEW FEATURES}{
\itemize{
\item Initial version. For now \code{eigs()} supports dense real
nonsymmetric matrices.
}
}
}
|
#Load Packages
library(tidyverse)
library(devtools)
library(lubridate)
## Load the data from Github
hypso <- read.csv('Data/tbhypo.csv', sep = ",")
winter <- read.csv('Data/winter.csv', sep = ",")
ice <- read.csv('Data/iceoniceoff.csv', sep = ",")
ice<- subset(ice, lakeid == "TB")
#Add a Year to Ice before Joining it to winter oxygen
ice$year <- ice$year + 1
#Join Hypsometry and Ice datasheets
winteroxy<- left_join(winter, hypso, by = "depth")
winteroxy<- left_join(winteroxy, ice, by = "year")
#Convert to Dates
winteroxy$datefirstice = mdy(winteroxy$datefirstice)
winteroxy$sampledate = mdy(winteroxy$sampledate)
#Calculate Hypsometrically Weighted Oxygen rates (need help in matching Timothy's protocols)
#Do I do anything with depth 8m? How to compensate for Ice thickness and change in lake volume?
winteroxy<- winteroxy %>%
mutate("multipliedVol" = volume * o2)
winteroxy<- winteroxy %>%
group_by(sampledate) %>%
mutate("oxygenMass" = sum(multipliedVol/61693.5))
#Last Days since freeze up
winteroxy<- winteroxy %>%
mutate("lastdays" = (sampledate - datefirstice ))
#Tim's subset
thesis<- winteroxy%>%
filter(year >1981, year <1992)
#Average Mimicking Table 10b
wholelake<- thesis%>%
group_by(lakeid.x)%>%
summarise(Mean = mean(oxygenMass),
sd = sd(oxygenMass),
min = min(oxygenMass),
max = max(oxygenMass))
#Example 1982 (slightly different than Timothy's)
year1982<- subset(winteroxy, year == 1982)
summary(lm(oxygenMass~lastdays, data =year1982))
| /Code/oxygen.R | no_license | algorsky/Oxygen | R | false | false | 1,520 | r | #Load Packages
library(tidyverse)
library(devtools)
library(lubridate)
## Load the data from Github
hypso <- read.csv('Data/tbhypo.csv', sep = ",")
winter <- read.csv('Data/winter.csv', sep = ",")
ice <- read.csv('Data/iceoniceoff.csv', sep = ",")
ice<- subset(ice, lakeid == "TB")
#Add a Year to Ice before Joining it to winter oxygen
ice$year <- ice$year + 1
#Join Hypsometry and Ice datasheets
winteroxy<- left_join(winter, hypso, by = "depth")
winteroxy<- left_join(winteroxy, ice, by = "year")
#Convert to Dates
winteroxy$datefirstice = mdy(winteroxy$datefirstice)
winteroxy$sampledate = mdy(winteroxy$sampledate)
#Calculate Hypsometrically Weighted Oxygen rates (need help in matching Timothy's protocols)
#Do I do anything with depth 8m? How to compensate for Ice thickness and change in lake volume?
winteroxy<- winteroxy %>%
mutate("multipliedVol" = volume * o2)
winteroxy<- winteroxy %>%
group_by(sampledate) %>%
mutate("oxygenMass" = sum(multipliedVol/61693.5))
#Last Days since freeze up
winteroxy<- winteroxy %>%
mutate("lastdays" = (sampledate - datefirstice ))
#Tim's subset
thesis<- winteroxy%>%
filter(year >1981, year <1992)
#Average Mimicking Table 10b
wholelake<- thesis%>%
group_by(lakeid.x)%>%
summarise(Mean = mean(oxygenMass),
sd = sd(oxygenMass),
min = min(oxygenMass),
max = max(oxygenMass))
#Example 1982 (slightly different than Timothy's)
year1982<- subset(winteroxy, year == 1982)
summary(lm(oxygenMass~lastdays, data =year1982))
|
#' setVerboseCatOption
#'
#' Allows user to toggle on and off printing messages on a per-function basis.
#' Should be usable in other packages, but not by importing.
#'
#' @aliases ifVerboseCat
#' @param fname Name of the function to control.
#' @param value Boolean value: should this function print out messages?
#' @return The new value of the namespace option for fname ifVerboseCat
#'
setVerboseCatOption = function(fname, value) {
namespaceName = getNamespaceName(environment(ifVerboseCat))
opts = options()
opts[[namespaceName]] [fname] = value
options(opts)
opts[[namespaceName]]
}
ifVerboseCat = function(...){
# Because of this line, you can copy this code into another package project
# without change, but you can't import this function.
namespaceName = getNamespaceName(environment(ifVerboseCat))
optionName = paste0(namespaceName, ".ifverboseCat")
### Initialize the option set.
if(is.null(options(namespaceName)[[1]])) {
setVerboseCatOption(fname, logical(0))
}
### Get name of calling function.
fname = try(as.character(parse(text=sys.call(-1)[1]))[1] )
if(class(fname) == "try-error") return(invisible(NULL))
### If option not yet set, set it to TRUE.
if(is.na(options(namespaceName)[[1]][fname])) {
setVerboseCatOption(fname, TRUE)
}
### If TRUE, print out the requested line of text.
if(options(namespaceName)[[1]] [fname])
catn(fname, ": ", ...)
invisible(NULL)
}
| /R/ifVerboseCat.R | no_license | professorbeautiful/NNTbiomarkerHome | R | false | false | 1,440 | r | #' setVerboseCatOption
#'
#' Allows user to toggle on and off printing messages on a per-function basis.
#' Should be usable in other packages, but not by importing.
#'
#' @aliases ifVerboseCat
#' @param fname Name of the function to control.
#' @param value Boolean value: should this function print out messages?
#' @return The new value of the namespace option for fname ifVerboseCat
#'
setVerboseCatOption = function(fname, value) {
namespaceName = getNamespaceName(environment(ifVerboseCat))
opts = options()
opts[[namespaceName]] [fname] = value
options(opts)
opts[[namespaceName]]
}
ifVerboseCat = function(...){
# Because of this line, you can copy this code into another package project
# without change, but you can't import this function.
namespaceName = getNamespaceName(environment(ifVerboseCat))
optionName = paste0(namespaceName, ".ifverboseCat")
### Initialize the option set.
if(is.null(options(namespaceName)[[1]])) {
setVerboseCatOption(fname, logical(0))
}
### Get name of calling function.
fname = try(as.character(parse(text=sys.call(-1)[1]))[1] )
if(class(fname) == "try-error") return(invisible(NULL))
### If option not yet set, set it to TRUE.
if(is.na(options(namespaceName)[[1]][fname])) {
setVerboseCatOption(fname, TRUE)
}
### If TRUE, print out the requested line of text.
if(options(namespaceName)[[1]] [fname])
catn(fname, ": ", ...)
invisible(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1Context}
\alias{GoogleCloudAiplatformV1Context}
\title{GoogleCloudAiplatformV1Context Object}
\usage{
GoogleCloudAiplatformV1Context(
GoogleCloudAiplatformV1Context.labels = NULL,
GoogleCloudAiplatformV1Context.metadata = NULL,
schemaTitle = NULL,
displayName = NULL,
etag = NULL,
labels = NULL,
schemaVersion = NULL,
metadata = NULL,
name = NULL,
description = NULL
)
}
\arguments{
\item{GoogleCloudAiplatformV1Context.labels}{The \link{GoogleCloudAiplatformV1Context.labels} object or list of objects}
\item{GoogleCloudAiplatformV1Context.metadata}{The \link{GoogleCloudAiplatformV1Context.metadata} object or list of objects}
\item{schemaTitle}{The title of the schema describing the metadata}
\item{displayName}{User provided display name of the Context}
\item{etag}{An eTag used to perform consistent read-modify-write updates}
\item{labels}{The labels with user-defined metadata to organize your Contexts}
\item{schemaVersion}{The version of the schema in schema_name to use}
\item{metadata}{Properties of the Context}
\item{name}{Immutable}
\item{description}{Description of the Context}
}
\value{
GoogleCloudAiplatformV1Context object
}
\description{
GoogleCloudAiplatformV1Context Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Instance of a general context.
}
\seealso{
Other GoogleCloudAiplatformV1Context functions:
\code{\link{GoogleCloudAiplatformV1Context.labels}()},
\code{\link{GoogleCloudAiplatformV1Context.metadata}()},
\code{\link{projects.locations.metadataStores.contexts.create}()},
\code{\link{projects.locations.metadataStores.contexts.patch}()}
}
\concept{GoogleCloudAiplatformV1Context functions}
| /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1Context.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 1,823 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1Context}
\alias{GoogleCloudAiplatformV1Context}
\title{GoogleCloudAiplatformV1Context Object}
\usage{
GoogleCloudAiplatformV1Context(
GoogleCloudAiplatformV1Context.labels = NULL,
GoogleCloudAiplatformV1Context.metadata = NULL,
schemaTitle = NULL,
displayName = NULL,
etag = NULL,
labels = NULL,
schemaVersion = NULL,
metadata = NULL,
name = NULL,
description = NULL
)
}
\arguments{
\item{GoogleCloudAiplatformV1Context.labels}{The \link{GoogleCloudAiplatformV1Context.labels} object or list of objects}
\item{GoogleCloudAiplatformV1Context.metadata}{The \link{GoogleCloudAiplatformV1Context.metadata} object or list of objects}
\item{schemaTitle}{The title of the schema describing the metadata}
\item{displayName}{User provided display name of the Context}
\item{etag}{An eTag used to perform consistent read-modify-write updates}
\item{labels}{The labels with user-defined metadata to organize your Contexts}
\item{schemaVersion}{The version of the schema in schema_name to use}
\item{metadata}{Properties of the Context}
\item{name}{Immutable}
\item{description}{Description of the Context}
}
\value{
GoogleCloudAiplatformV1Context object
}
\description{
GoogleCloudAiplatformV1Context Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Instance of a general context.
}
\seealso{
Other GoogleCloudAiplatformV1Context functions:
\code{\link{GoogleCloudAiplatformV1Context.labels}()},
\code{\link{GoogleCloudAiplatformV1Context.metadata}()},
\code{\link{projects.locations.metadataStores.contexts.create}()},
\code{\link{projects.locations.metadataStores.contexts.patch}()}
}
\concept{GoogleCloudAiplatformV1Context functions}
|
## Task: create one R script called run_analysis.R that does the following.
#####################################################################
## a) Merges the training and the test sets to create one data set.
#####################################################################
## a1) check folder exists, otherwise exist with warning message
message("checking ./UCI HAR Dataset exits")
folder <- "./UCI HAR Dataset"
if (!file.exists(folder)) { stop("./UCI HAR Dataset does not exits!")}
## a2) load data frome training/testing data
loaddata <- function(name) {
if (!(name %in% c("train", "test"))) {stop("no such folder!")}
foldername <- paste(folder, "/", name, sep="")
## read subject
subject <- read.table(paste(foldername, "/", "subject_", name, ".txt",
sep=""), colClasses="numeric")
## read X
feature <- read.table(paste(foldername, "/", "X_", name, ".txt", sep=""),
colClasses="numeric")
## read Y
activityid <- read.table(paste(foldername, "/", "y_", name, ".txt", sep=""),
colClasses="numeric")
## return data frame
data <- data.frame(subject=subject, activityid=activityid,
activity=activityid, feature=feature)
}
foldertrain <- paste(folder, "/train", sep="")
message("checking UCI HAR Dataset/train exists...")
if (!file.exists(foldertrain)) { stop("./UCI HAR Dataset/train does not exits!")}
## load training data
message("loading train data...")
traindata <- loaddata("train")
## load testing data
message("loading test data...")
testdata <- loaddata("test")
## row combine training and testing data
traintestdata <- rbind(traindata, testdata)
################################################################################
## b) Extracts only the measurements on the mean and standard deviation for
## each measurement.
################################################################################
## load feature names
message("select mean and standard deviation for each measurement...")
featurename <- read.table(paste(folder, "/features.txt", sep=""))[[2]]
featurename <- as.character(levels(featurename))[featurename]
## subset featurename to find names contain "-mean()" or "-std()"
featureindex <- grepl("-mean[^F]()|-std()", featurename)
featurename <- featurename[featureindex]
## clean names
## remove parenthsis
featurename <- gsub("\\(\\)", "", featurename)
## replace "-" by "."
featurename <- gsub("-", ".", featurename)
featurename <- gsub("Body", "body.", featurename)
## remove duplication substring "Body" in the names
featurename <- gsub("body.body.", "body.", featurename)
## replace abbre by full name
## "t" -> "time"
# "f" -> "frequency"
## "Acc" -> acceleration
## "Mag" -> magnitude
## "Gyro" -> gyroscope
featurename <- gsub("Acc", "acceleration.", featurename)
featurename <- gsub("Mag", "magnitude.", featurename)
featurename <- gsub("Gyro", "gyroscope.", featurename)
featurename <- gsub("Jerk", "jerk.", featurename)
featurename <- gsub("^t", "time.", featurename)
featurename <- gsub("^f", "frequency.", featurename)
## remove duplicate "."
featurename <- gsub("\\.\\.", "\\.", featurename)
## to lower case
featurename <- tolower(featurename)
## subset features, only keep mean or std features
traintestdata <- traintestdata[, c(TRUE, TRUE, TRUE, featureindex)]
## add names to each col
names(traintestdata) <- c("subject", "activity.id", "activity.name", featurename)
################################################################################
## c) Uses descriptive activity names to name the activities in the data set
################################################################################
message("add activity names to data...")
## load activity names
activityname <- read.table(paste(folder, "/activity_labels.txt", sep=""))[[2]]
## turn factor to vector
actitityname <- as.character(levels(activityname))[activityname]
## replace "_" by "." and turn character to lower case
activityname <- tolower(gsub("_", ".", activityname))
## set activity name in traintestdata
traintestdata$activity.name <- activityname[traintestdata$activity.name]
################################################################################
## d) Appropriately labels the data set with descriptive activity names.
################################################################################
## this is done in step b)
################################################################################
## e) Creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
################################################################################
message("create tidydata set...")
## split traintestdata according to subject and activity.name columns
splitdata <- split(traintestdata, list(traintestdata$subject,
traintestdata$activity.name))
## function used to extract average for each measurements
colmean <- function(data) {
datamean <- colMeans(data[, c(-1,-2,-3)])
res <- c(data[1,1:3], datamean)
as.data.frame(res)
}
## apply lapply to each element in splitdata
## use rbind to combine list to a dataframe
tidydata <- do.call(rbind, lapply(splitdata, colmean))
## add "average" to the names of measurements
names(tidydata) <- c(names(tidydata)[1:3],
paste(names(tidydata)[c(-1,-2,-3)],"average", sep="."))
## store tidydata into file
write.table(tidydata, file="tidydata.txt", sep="\t")
message("Data preprocessing done...") | /run_analysis.R | no_license | akzfowl/Getting-And-Cleaning-Data-Project | R | false | false | 5,616 | r | ## Task: create one R script called run_analysis.R that does the following.
#####################################################################
## a) Merges the training and the test sets to create one data set.
#####################################################################
## a1) check folder exists, otherwise exist with warning message
message("checking ./UCI HAR Dataset exits")
folder <- "./UCI HAR Dataset"
if (!file.exists(folder)) { stop("./UCI HAR Dataset does not exits!")}
## a2) load data frome training/testing data
loaddata <- function(name) {
if (!(name %in% c("train", "test"))) {stop("no such folder!")}
foldername <- paste(folder, "/", name, sep="")
## read subject
subject <- read.table(paste(foldername, "/", "subject_", name, ".txt",
sep=""), colClasses="numeric")
## read X
feature <- read.table(paste(foldername, "/", "X_", name, ".txt", sep=""),
colClasses="numeric")
## read Y
activityid <- read.table(paste(foldername, "/", "y_", name, ".txt", sep=""),
colClasses="numeric")
## return data frame
data <- data.frame(subject=subject, activityid=activityid,
activity=activityid, feature=feature)
}
foldertrain <- paste(folder, "/train", sep="")
message("checking UCI HAR Dataset/train exists...")
if (!file.exists(foldertrain)) { stop("./UCI HAR Dataset/train does not exits!")}
## load training data
message("loading train data...")
traindata <- loaddata("train")
## load testing data
message("loading test data...")
testdata <- loaddata("test")
## row combine training and testing data
traintestdata <- rbind(traindata, testdata)
################################################################################
## b) Extracts only the measurements on the mean and standard deviation for
## each measurement.
################################################################################
## load feature names
message("select mean and standard deviation for each measurement...")
featurename <- read.table(paste(folder, "/features.txt", sep=""))[[2]]
featurename <- as.character(levels(featurename))[featurename]
## subset featurename to find names contain "-mean()" or "-std()"
featureindex <- grepl("-mean[^F]()|-std()", featurename)
featurename <- featurename[featureindex]
## clean names
## remove parenthsis
featurename <- gsub("\\(\\)", "", featurename)
## replace "-" by "."
featurename <- gsub("-", ".", featurename)
featurename <- gsub("Body", "body.", featurename)
## remove duplication substring "Body" in the names
featurename <- gsub("body.body.", "body.", featurename)
## replace abbre by full name
## "t" -> "time"
# "f" -> "frequency"
## "Acc" -> acceleration
## "Mag" -> magnitude
## "Gyro" -> gyroscope
featurename <- gsub("Acc", "acceleration.", featurename)
featurename <- gsub("Mag", "magnitude.", featurename)
featurename <- gsub("Gyro", "gyroscope.", featurename)
featurename <- gsub("Jerk", "jerk.", featurename)
featurename <- gsub("^t", "time.", featurename)
featurename <- gsub("^f", "frequency.", featurename)
## remove duplicate "."
featurename <- gsub("\\.\\.", "\\.", featurename)
## to lower case
featurename <- tolower(featurename)
## subset features, only keep mean or std features
traintestdata <- traintestdata[, c(TRUE, TRUE, TRUE, featureindex)]
## add names to each col
names(traintestdata) <- c("subject", "activity.id", "activity.name", featurename)
################################################################################
## c) Uses descriptive activity names to name the activities in the data set
################################################################################
message("add activity names to data...")
## load activity names
activityname <- read.table(paste(folder, "/activity_labels.txt", sep=""))[[2]]
## turn factor to vector
actitityname <- as.character(levels(activityname))[activityname]
## replace "_" by "." and turn character to lower case
activityname <- tolower(gsub("_", ".", activityname))
## set activity name in traintestdata
traintestdata$activity.name <- activityname[traintestdata$activity.name]
################################################################################
## d) Appropriately labels the data set with descriptive activity names.
################################################################################
## this is done in step b)
################################################################################
## e) Creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
################################################################################
message("create tidydata set...")
## split traintestdata according to subject and activity.name columns
splitdata <- split(traintestdata, list(traintestdata$subject,
traintestdata$activity.name))
## function used to extract average for each measurements
colmean <- function(data) {
datamean <- colMeans(data[, c(-1,-2,-3)])
res <- c(data[1,1:3], datamean)
as.data.frame(res)
}
## apply lapply to each element in splitdata
## use rbind to combine list to a dataframe
tidydata <- do.call(rbind, lapply(splitdata, colmean))
## add "average" to the names of measurements
names(tidydata) <- c(names(tidydata)[1:3],
paste(names(tidydata)[c(-1,-2,-3)],"average", sep="."))
## store tidydata into file
write.table(tidydata, file="tidydata.txt", sep="\t")
message("Data preprocessing done...") |
#----HEADER-------------------------------------------------------------------------------------------------------------
# Purpose: PART ONE - Prepare negative binomial regression of diphtheria cases for input into codcorrect
# PART TWO - Format for CodCorrect and save results to database
# PART THREE - Run DisMod model for CFR
# PART FOUR - Calculate nonfatal outcomes from mortality
# Use mortality to calculate prevalence (prev = mort/cfr*duration)
# Calculate incidence from prevalence (inc = prev/duration)
# PART FIVE - Format for COMO and save results to database
#***********************************************************************************************************************
########################################################################################################################
##### START-UP #########################################################################################################
########################################################################################################################
#----CONFIG-------------------------------------------------------------------------------------------------------------
### load packages
pacman::p_load(magrittr, foreign, stats, MASS, data.table, rhdf5, plyr, data.table, parallel, dplyr)
if (Sys.info()["sysname"]=="Linux") require(mvtnorm, lib="FILEPATH") else pacman::p_load(mvtnorm)
#***********************************************************************************************************************
#----DIRECTORIES--------------------------------------------------------------------------------------------------------
### set objects
acause <- "diptheria"
age_start <- 4
age_end <- 16
cause_id <- 338
me_id <- 1421
gbd_round <- 5
year_end <- gbd_round + 2012
### draw numbers
draw_nums_gbd <- 0:999
draw_cols_upload <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("draw_", draw_nums_gbd))
### make folders on cluster
# CoD
cl.death.dir <- file.path("FILEPATH", acause, "mortality", custom_version, "draws")
if (!dir.exists(cl.death.dir)) dir.create(cl.death.dir, recursive=TRUE)
# nonfatal/epi
cl.version.dir <- file.path("FILEPATH", acause, "nonfatal", custom_version, "draws")
if (!dir.exists(cl.version.dir)) dir.create(cl.version.dir, recursive=TRUE)
### directories
home <- file.path(j_root, "FILEPATH", acause, "00_documentation")
j.version.dir <- file.path(home, "models", custom_version)
j.version.dir.inputs <- file.path(j.version.dir, "model_inputs")
j.version.dir.logs <- file.path(j.version.dir, "model_logs")
if (!dir.exists(j.version.dir.inputs)) dir.create(j.version.dir.inputs, recursive=TRUE)
if (!dir.exists(j.version.dir.logs)) dir.create(j.version.dir.logs, recursive=TRUE)
### save description of model run
# which model components are being uploaded?
if (CALCULATE_NONFATAL=="yes" & CALCULATE_FATAL=="no") add_ <- "NF"
if (CALCULATE_FATAL=="yes" & CALCULATE_NONFATAL=="no") add_ <- "CoD"
if (CALCULATE_NONFATAL=="yes" & CALCULATE_FATAL=="yes") add_ <- "NF and CoD"
# record CODEm data-rich feeder model versions used in CoD hybridization
if (CALCULATE_FATAL=="yes") description <- paste0("DR M ", male_CODEm_version, ", DR F ", female_CODEm_version, ", ", description)
# save full description to model folder
description <- paste0(add_, " - ", description)
cat(description, file=file.path(j.version.dir, "MODEL_DESCRIPTION.txt"))
#***********************************************************************************************************************
#----FUNCTIONS----------------------------------------------------------------------------------------------------------
### load custom functions
"FILEPATH/sql_query.R" %>% source
"FILEPATH/read_hdf5_table.R" %>% source
### load shared functions
file.path(j_root, "FILEPATH/get_population.R") %>% source
file.path(j_root, "FILEPATH/get_location_metadata.R") %>% source
file.path(j_root, "FILEPATH/get_covariate_estimates.R") %>% source
file.path(j_root, "FILEPATH/get_envelope.R") %>% source
file.path(j_root, "FILEPATH/get_draws.R") %>% source
file.path(j_root, "FILEPATH/get_cod_data.R") %>% source
#***********************************************************************************************************************
########################################################################################################################
##### PART ONE: COD NEG BIN REGRESSION #################################################################################
########################################################################################################################
#----PREP---------------------------------------------------------------------------------------------------------------
### get location data
locations <- get_location_metadata(gbd_round_id=gbd_round, location_set_id=22)[, ## id=22 is from covariates team, id=9 is from epi
.(location_id, ihme_loc_id, location_name, location_ascii_name, region_id, super_region_id, level, location_type, parent_id, super_region_name, sort_order)]
pop_locs <- unique(locations[level >= 3, location_id])
### pop_env
# get envelope
envelope <- get_envelope(location_id=pop_locs, sex_id=1:2, age_group_id=c(age_start:age_end), with_hiv=0, year_id=1980:year_end)[,
.(location_id, year_id, age_group_id, sex_id, mean, run_id)]
setnames(envelope, "mean", "envelope")
# get population data
population <- get_population(location_id=pop_locs, year_id=1980:year_end, age_group_id=c(age_start:age_end), sex_id=1:2)[,
.(location_id, year_id, age_group_id, sex_id, population, run_id)]
# save model version
cat(paste0("Mortality envelope - model run ", unique(envelope$run_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
cat(paste0("Population - model run ", unique(population$run_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# bring together
pop_env <- merge(population, envelope, by=c("location_id", "year_id", "age_group_id", "sex_id"))
if (CALCULATE_FATAL == "yes") {
### covariates
# covariate: DTP3_coverage_prop, covariate_id=32
covar <- get_covariate_estimates(covariate_id=32, year_id=1980:year_end, location_id=pop_locs, gbd_round_id=gbd_round)[,
.(location_id, year_id, mean_value, model_version_id)]
setnames(covar, "mean_value", "DTP3_coverage_prop")
### save covariate versions
cat(paste0("Covariate DTP3_coverage_prop (CoD) - model version ", unique(covar$model_version_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
### raw
# get raw data
raw <- get_cod_data(cause_id=cause_id, gbd_round_id=gbd_round) %>% setnames(., "year", "year_id") %>% .[acause=="diptheria", .(nid, location_id, location_name, year_id, age_group_id, sex, cf_corr, sample_size, description)] %>% setnames(., "sex", "sex_id")
raw <- raw[!is.na(cf_corr) & year_id >= 1980 & sample_size != 0 & age_group_id %in% c(age_start:age_end), ]
# save model version
cat(paste0("CoD data - version ", unique(raw$description)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# add on mortality envelope
raw <- merge(raw, pop_env, by=c("location_id", "year_id", "age_group_id", "sex_id"), all.x=TRUE)
# calculate death counts
raw[, deaths := cf_corr * envelope]
### round to whole number (counts!)
raw[deaths < 0.5, deaths := 0]
### inform model with national data only
raw <- raw[location_id %in% locations[level==3, location_id], ]
### drop outliers (cf greather than 99th percentile)
cf_999 <- quantile(raw$cf_corr, 0.999)
raw <- raw[cf_corr <= cf_999, ]
### make ages into levels
raw[, age_group_id := as.factor(age_group_id)]
### bring together variables for regression
regress <- merge(raw, covar, by=c("location_id", "year_id"))
### save file for reference
write.csv(regress, file.path(j.version.dir.inputs, "inputs_for_nbreg_COD.csv"), row.names=FALSE)
#***********************************************************************************************************************
#----NEG BIN MODEL------------------------------------------------------------------------------------------------------
### run negative binomial regression
GLM <- glm.nb(deaths ~ DTP3_coverage_prop + age_group_id + offset(log(envelope)), data=regress)
# save log
capture.output(summary(GLM), file = file.path(j.version.dir.logs, "log_deaths_nbreg.txt"), type="output")
#***********************************************************************************************************************
#----DRAWS--------------------------------------------------------------------------------------------------------------
### set random seed
set.seed(0311)
### prep prediction dataset
pred_death <- merge(pop_env, covar, by=c("year_id", "location_id"), all.x=TRUE)
N <- nrow(pred_death)
### 1000 draws for uncertainty
# coefficient matrix
coefmat <- c(coef(GLM))
names(coefmat)[1] <- "constant"
names(coefmat) <- paste("b", names(coefmat), sep = "_")
coefmat <- matrix(unlist(coefmat), ncol=14, byrow=TRUE, dimnames=list(c("coef"), c(as.vector(names(coefmat)))))
### covariance matrix
vcovmat <- vcov(GLM)
# create draws of coefficients using mean (from coefficient matrix) and SD (from covariance matrix)
betas <- t(rmvnorm(n=length(draw_nums_gbd), mean=coefmat, sigma=vcovmat)) %>% data.table
colnames(betas) <- paste0("beta_draw_", draw_nums_gbd)
betas[, age_group_id := c(NA, NA, 5:16)]
# merge together predictions with age draws by age_group_id
pred_death <- merge(pred_death, betas[!is.na(age_group_id), ], by="age_group_id", all.x=TRUE)
pred_death[age_group_id==4 & is.na(beta_draw_0), paste0("beta_draw_", draw_nums_gbd) := 0]
### create draws of disperion parameter
alphas <- 1 / exp(rnorm(1000, mean=GLM$theta, sd=GLM$SE.theta))
if (GAMMA_EPSILON == "with") {
lapply(draw_nums_gbd, function (draw) {
# set betas
b0 <- betas[1, ][[paste0("beta_draw_", draw)]]
b1 <- betas[2, ][[paste0("beta_draw_", draw)]]
# age fixed effects
age.fe <- pred_death[[paste0("beta_draw_", draw)]]
alpha <- alphas[draw + 1]
# calculate 1000 draws
pred_death[, paste0("death_draw_", draw) := rgamma( N, scale=(alpha * exp( b0 + (b1 * DTP3_coverage_prop) + age.fe ) * envelope ),
shape=(1 / alpha) ) ]
})
} else if (GAMMA_EPSILON == "without") {
lapply(draw_nums_gbd, function (draw) {
# set betas
b0 <- betas[1, ][[paste0("beta_draw_", draw)]]
b1 <- betas[2, ][[paste0("beta_draw_", draw)]]
# age fixed effects
age.fe <- pred_death[[paste0("beta_draw_", draw)]]
alpha <- alphas[draw + 1]
# calculate 1000 draws
pred_death[, paste0("death_draw_", draw) := exp( b0 + (b1 * DTP3_coverage_prop) + age.fe ) * envelope ]
})
}
### save results
pred_death_save <- pred_death[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("death_draw_", draw_nums_gbd)), with=FALSE]
if (WRITE_FILES == "yes") {
write.csv(pred_death_save, file.path(j.version.dir, paste0("01_death_predictions_from_model.csv")), row.names=FALSE)
}
#***********************************************************************************************************************
#----HYBRIDIZE----------------------------------------------------------------------------------------------------------
### read CODEm COD results for data-rich countries
cod_M <- read_hdf5_table(file.path("FILEPATH")), key="")
cod_F <- read_hdf5_table(file.path("FILEPATH")), key="")
# save model version
cat(paste0("Data-rich CODEm feeder model (males) - model version ", male_CODEm_version),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
cat(paste0("Data-rich CODEm feeder model (females) - model version ", female_CODEm_version),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# combine M/F CODEm results
cod_DR <- rbind(cod_M, cod_F, fill=TRUE)
cod_DR <- cod_DR[, draw_cols_upload, with=FALSE]
# hybridize data-rich and custom models
data_rich <- unique(cod_DR$location_id)
deaths_glb <- pred_death_save[!location_id %in% data_rich, ]
colnames(deaths_glb) <- draw_cols_upload
deaths_hyb <- rbind(deaths_glb, cod_DR)
# keep only needed age groups
deaths_hyb <- deaths_hyb[age_group_id %in% c(age_start:age_end), ]
pred_death_save <- copy(deaths_hyb)
#***********************************************************************************************************************
########################################################################################################################
##### PART TWO: FORMAT FOR CODCORRECT ##################################################################################
########################################################################################################################
#----SAVE RESULTS-------------------------------------------------------------------------------------------------------
### save to share directory for upload
colnames(pred_death_save) <- draw_cols_upload
pred_death_save[, measure_id := 1]
lapply(unique(pred_death_save$location_id), function(x) write.csv(pred_death_save[location_id==x, ], file.path(cl.death.dir, paste0(x, ".csv")), row.names=FALSE))
print(paste0("death draws saved in ", cl.death.dir))
### save_results
job <- paste0("qsub -N s_cod_", acause, " -pe multi_slot 50 -P PROJECT -o /FILEPATH/", username, " -e /FILEPATH/", username,
" /FILEPATH/r_shell.sh /FILEPATH/save_results_wrapper.r",
" --args",
" --type cod",
" --me_id ", cause_id,
" --input_directory ", cl.death.dir,
" --descript ", "\"", gsub(" ", "_", save_results_description), "\"",
" --best ", mark_model_best)
system(job); print(job)
#***********************************************************************************************************************
}
if (CALCULATE_NONFATAL == "yes") {
########################################################################################################################
##### PART THREE: DisMod NONFATAL RESULTS ##############################################################################
########################################################################################################################
#----GET CFR------------------------------------------------------------------------------------------------------------
### read in results from CFR model in DisMod
cfr_dismod <- get_draws(gbd_id_type="modelable_entity_id", gbd_id=2834, status="best", gbd_round_id=gbd_round, source="epi")
# save model version
cat(paste0("Case fatality ratio DisMod model (me_id 2834) - model run ", unique(cfr_dismod$model_version_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# remove excess columns
cfr_dismod <- cfr_dismod[, draw_cols_upload, with=FALSE]
colnames(cfr_dismod) <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("cfr_draw_", draw_nums_gbd))
#***********************************************************************************************************************
########################################################################################################################
##### PART FOUR: MORTALITY TO NONFATAL #################################################################################
########################################################################################################################
#----PREP---------------------------------------------------------------------------------------------------------------
### bring in duration data
duration <- read.csv(file.path(j_root, "FILEPATH/duration_draws.csv")) %>% data.table
duration <- duration[cause=="A05.a", ]
colnames(duration) <- c("cause", paste0("dur_draw_", draw_nums_gbd))
### prep death data
deaths <- get_draws("cause_id", cause_id, "codcorrect", location_id=pop_locs, year_id=unique(cfr_dismod$year_id), gbd_round_id=gbd_round,
measure_id=1, status=compare_version)
# save model version
if (!is.null(unique(deaths$output_version_id))) vers <- unique(deaths$output_version_id) else vers <- custom_version
cat(paste0("CodCorrect results - output version ", vers),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# deaths <- copy(pred_death_save) # run this line instead of above if don't have CodCorrect results yet
# remove excess columns
deaths <- deaths[, draw_cols_upload, with=FALSE]
colnames(deaths) <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("death_draw_", draw_nums_gbd))
### bring together variables for nonfatal calculations
predict_nonfatal <- merge(cfr_dismod, deaths, by=c("location_id", "year_id", "age_group_id", "sex_id"))
predict_nonfatal <- merge(predict_nonfatal, pop_env, by=c("location_id", "year_id", "age_group_id", "sex_id"))
predict_nonfatal <- merge(predict_nonfatal[, cause := "A05.a"], duration, by="cause", all.x=TRUE)
#***********************************************************************************************************************
#----CALCULATE PREVALENCE-----------------------------------------------------------------------------------------------
### calculate prevalence (mort/cfr*duration)
lapply(draw_nums_gbd, function (i) {
predict_nonfatal[, paste0("prev_draw_", i) := ( (get(paste0("death_draw_", i)) / population) / get(paste0("cfr_draw_", i)) ) * get(paste0("dur_draw_", i))]
predict_nonfatal[, paste0("inc_draw_", i) := get(paste0("prev_draw_", i)) / get(paste0("dur_draw_", i))]
})
### keep needed columns
predictions_prev_save <- predict_nonfatal[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("prev_draw_", draw_nums_gbd)), with=FALSE]
predictions_inc_save <- predict_nonfatal[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("inc_draw_", draw_nums_gbd)), with=FALSE]
#***********************************************************************************************************************
########################################################################################################################
##### PART FIVE: FORMAT FOR COMO #######################################################################################
########################################################################################################################
#----SAVE RESULTS-------------------------------------------------------------------------------------------------------
### format prevalence for como, prevalence measure_id==5
colnames(predictions_prev_save) <- draw_cols_upload
predictions_prev_save[, measure_id := 5]
### format incidence for como, incidence measure_id==6
colnames(predictions_inc_save) <- draw_cols_upload
predictions_inc_save[, measure_id := 6]
### write nonfatal outcomes to cluster
save_nonfatal <- rbind(predictions_prev_save, predictions_inc_save)
lapply(unique(save_nonfatal$location_id), function(x) write.csv(save_nonfatal[location_id==x, ],
file.path(cl.version.dir, paste0(x, ".csv")), row.names=FALSE))
print(paste0("nonfatal estimates saved in ", cl.version.dir))
### upload results to db
job <- paste0("qsub -N s_epi_", acause, " -pe multi_slot 40 -P PROJECT -o /FILEPATH/", username, " -e /FILEPATH/", username,
" /FILEPATH/r_shell.sh /FILEPATH/save_results_wrapper.r",
" --args",
" --type epi",
" --me_id ", me_id,
" --year_ids ", paste(unique(save_nonfatal$year_id), collapse=","),
" --input_directory ", cl.version.dir,
" --descript ", "\"", gsub(" ", "_", save_results_description), "\"",
" --best ", mark_model_best)
system(job); print(job)
#***********************************************************************************************************************
} | /gbd_2017/cod_code/diptheria/01_inverse_model.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 20,149 | r | #----HEADER-------------------------------------------------------------------------------------------------------------
# Purpose: PART ONE - Prepare negative binomial regression of diphtheria cases for input into codcorrect
# PART TWO - Format for CodCorrect and save results to database
# PART THREE - Run DisMod model for CFR
# PART FOUR - Calculate nonfatal outcomes from mortality
# Use mortality to calculate prevalence (prev = mort/cfr*duration)
# Calculate incidence from prevalence (inc = prev/duration)
# PART FIVE - Format for COMO and save results to database
#***********************************************************************************************************************
########################################################################################################################
##### START-UP #########################################################################################################
########################################################################################################################
#----CONFIG-------------------------------------------------------------------------------------------------------------
### load packages
pacman::p_load(magrittr, foreign, stats, MASS, data.table, rhdf5, plyr, data.table, parallel, dplyr)
if (Sys.info()["sysname"]=="Linux") require(mvtnorm, lib="FILEPATH") else pacman::p_load(mvtnorm)
#***********************************************************************************************************************
#----DIRECTORIES--------------------------------------------------------------------------------------------------------
### set objects
acause <- "diptheria"
age_start <- 4
age_end <- 16
cause_id <- 338
me_id <- 1421
gbd_round <- 5
year_end <- gbd_round + 2012
### draw numbers
draw_nums_gbd <- 0:999
draw_cols_upload <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("draw_", draw_nums_gbd))
### make folders on cluster
# CoD
cl.death.dir <- file.path("FILEPATH", acause, "mortality", custom_version, "draws")
if (!dir.exists(cl.death.dir)) dir.create(cl.death.dir, recursive=TRUE)
# nonfatal/epi
cl.version.dir <- file.path("FILEPATH", acause, "nonfatal", custom_version, "draws")
if (!dir.exists(cl.version.dir)) dir.create(cl.version.dir, recursive=TRUE)
### directories
home <- file.path(j_root, "FILEPATH", acause, "00_documentation")
j.version.dir <- file.path(home, "models", custom_version)
j.version.dir.inputs <- file.path(j.version.dir, "model_inputs")
j.version.dir.logs <- file.path(j.version.dir, "model_logs")
if (!dir.exists(j.version.dir.inputs)) dir.create(j.version.dir.inputs, recursive=TRUE)
if (!dir.exists(j.version.dir.logs)) dir.create(j.version.dir.logs, recursive=TRUE)
### save description of model run
# which model components are being uploaded?
if (CALCULATE_NONFATAL=="yes" & CALCULATE_FATAL=="no") add_ <- "NF"
if (CALCULATE_FATAL=="yes" & CALCULATE_NONFATAL=="no") add_ <- "CoD"
if (CALCULATE_NONFATAL=="yes" & CALCULATE_FATAL=="yes") add_ <- "NF and CoD"
# record CODEm data-rich feeder model versions used in CoD hybridization
if (CALCULATE_FATAL=="yes") description <- paste0("DR M ", male_CODEm_version, ", DR F ", female_CODEm_version, ", ", description)
# save full description to model folder
description <- paste0(add_, " - ", description)
cat(description, file=file.path(j.version.dir, "MODEL_DESCRIPTION.txt"))
#***********************************************************************************************************************
#----FUNCTIONS----------------------------------------------------------------------------------------------------------
### load custom functions
"FILEPATH/sql_query.R" %>% source
"FILEPATH/read_hdf5_table.R" %>% source
### load shared functions
file.path(j_root, "FILEPATH/get_population.R") %>% source
file.path(j_root, "FILEPATH/get_location_metadata.R") %>% source
file.path(j_root, "FILEPATH/get_covariate_estimates.R") %>% source
file.path(j_root, "FILEPATH/get_envelope.R") %>% source
file.path(j_root, "FILEPATH/get_draws.R") %>% source
file.path(j_root, "FILEPATH/get_cod_data.R") %>% source
#***********************************************************************************************************************
########################################################################################################################
##### PART ONE: COD NEG BIN REGRESSION #################################################################################
########################################################################################################################
#----PREP---------------------------------------------------------------------------------------------------------------
### get location data
locations <- get_location_metadata(gbd_round_id=gbd_round, location_set_id=22)[, ## id=22 is from covariates team, id=9 is from epi
.(location_id, ihme_loc_id, location_name, location_ascii_name, region_id, super_region_id, level, location_type, parent_id, super_region_name, sort_order)]
pop_locs <- unique(locations[level >= 3, location_id])
### pop_env
# get envelope
envelope <- get_envelope(location_id=pop_locs, sex_id=1:2, age_group_id=c(age_start:age_end), with_hiv=0, year_id=1980:year_end)[,
.(location_id, year_id, age_group_id, sex_id, mean, run_id)]
setnames(envelope, "mean", "envelope")
# get population data
population <- get_population(location_id=pop_locs, year_id=1980:year_end, age_group_id=c(age_start:age_end), sex_id=1:2)[,
.(location_id, year_id, age_group_id, sex_id, population, run_id)]
# save model version
cat(paste0("Mortality envelope - model run ", unique(envelope$run_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
cat(paste0("Population - model run ", unique(population$run_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# bring together
pop_env <- merge(population, envelope, by=c("location_id", "year_id", "age_group_id", "sex_id"))
if (CALCULATE_FATAL == "yes") {
### covariates
# covariate: DTP3_coverage_prop, covariate_id=32
covar <- get_covariate_estimates(covariate_id=32, year_id=1980:year_end, location_id=pop_locs, gbd_round_id=gbd_round)[,
.(location_id, year_id, mean_value, model_version_id)]
setnames(covar, "mean_value", "DTP3_coverage_prop")
### save covariate versions
cat(paste0("Covariate DTP3_coverage_prop (CoD) - model version ", unique(covar$model_version_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
### raw
# get raw data
raw <- get_cod_data(cause_id=cause_id, gbd_round_id=gbd_round) %>% setnames(., "year", "year_id") %>% .[acause=="diptheria", .(nid, location_id, location_name, year_id, age_group_id, sex, cf_corr, sample_size, description)] %>% setnames(., "sex", "sex_id")
raw <- raw[!is.na(cf_corr) & year_id >= 1980 & sample_size != 0 & age_group_id %in% c(age_start:age_end), ]
# save model version
cat(paste0("CoD data - version ", unique(raw$description)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# add on mortality envelope
raw <- merge(raw, pop_env, by=c("location_id", "year_id", "age_group_id", "sex_id"), all.x=TRUE)
# calculate death counts
raw[, deaths := cf_corr * envelope]
### round to whole number (counts!)
raw[deaths < 0.5, deaths := 0]
### inform model with national data only
raw <- raw[location_id %in% locations[level==3, location_id], ]
### drop outliers (cf greather than 99th percentile)
cf_999 <- quantile(raw$cf_corr, 0.999)
raw <- raw[cf_corr <= cf_999, ]
### make ages into levels
raw[, age_group_id := as.factor(age_group_id)]
### bring together variables for regression
regress <- merge(raw, covar, by=c("location_id", "year_id"))
### save file for reference
write.csv(regress, file.path(j.version.dir.inputs, "inputs_for_nbreg_COD.csv"), row.names=FALSE)
#***********************************************************************************************************************
#----NEG BIN MODEL------------------------------------------------------------------------------------------------------
### run negative binomial regression
GLM <- glm.nb(deaths ~ DTP3_coverage_prop + age_group_id + offset(log(envelope)), data=regress)
# save log
capture.output(summary(GLM), file = file.path(j.version.dir.logs, "log_deaths_nbreg.txt"), type="output")
#***********************************************************************************************************************
#----DRAWS--------------------------------------------------------------------------------------------------------------
### set random seed
set.seed(0311)
### prep prediction dataset
pred_death <- merge(pop_env, covar, by=c("year_id", "location_id"), all.x=TRUE)
N <- nrow(pred_death)
### 1000 draws for uncertainty
# coefficient matrix
coefmat <- c(coef(GLM))
names(coefmat)[1] <- "constant"
names(coefmat) <- paste("b", names(coefmat), sep = "_")
coefmat <- matrix(unlist(coefmat), ncol=14, byrow=TRUE, dimnames=list(c("coef"), c(as.vector(names(coefmat)))))
### covariance matrix
vcovmat <- vcov(GLM)
# create draws of coefficients using mean (from coefficient matrix) and SD (from covariance matrix)
betas <- t(rmvnorm(n=length(draw_nums_gbd), mean=coefmat, sigma=vcovmat)) %>% data.table
colnames(betas) <- paste0("beta_draw_", draw_nums_gbd)
betas[, age_group_id := c(NA, NA, 5:16)]
# merge together predictions with age draws by age_group_id
pred_death <- merge(pred_death, betas[!is.na(age_group_id), ], by="age_group_id", all.x=TRUE)
pred_death[age_group_id==4 & is.na(beta_draw_0), paste0("beta_draw_", draw_nums_gbd) := 0]
### create draws of disperion parameter
alphas <- 1 / exp(rnorm(1000, mean=GLM$theta, sd=GLM$SE.theta))
if (GAMMA_EPSILON == "with") {
lapply(draw_nums_gbd, function (draw) {
# set betas
b0 <- betas[1, ][[paste0("beta_draw_", draw)]]
b1 <- betas[2, ][[paste0("beta_draw_", draw)]]
# age fixed effects
age.fe <- pred_death[[paste0("beta_draw_", draw)]]
alpha <- alphas[draw + 1]
# calculate 1000 draws
pred_death[, paste0("death_draw_", draw) := rgamma( N, scale=(alpha * exp( b0 + (b1 * DTP3_coverage_prop) + age.fe ) * envelope ),
shape=(1 / alpha) ) ]
})
} else if (GAMMA_EPSILON == "without") {
lapply(draw_nums_gbd, function (draw) {
# set betas
b0 <- betas[1, ][[paste0("beta_draw_", draw)]]
b1 <- betas[2, ][[paste0("beta_draw_", draw)]]
# age fixed effects
age.fe <- pred_death[[paste0("beta_draw_", draw)]]
alpha <- alphas[draw + 1]
# calculate 1000 draws
pred_death[, paste0("death_draw_", draw) := exp( b0 + (b1 * DTP3_coverage_prop) + age.fe ) * envelope ]
})
}
### save results
pred_death_save <- pred_death[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("death_draw_", draw_nums_gbd)), with=FALSE]
if (WRITE_FILES == "yes") {
write.csv(pred_death_save, file.path(j.version.dir, paste0("01_death_predictions_from_model.csv")), row.names=FALSE)
}
#***********************************************************************************************************************
#----HYBRIDIZE----------------------------------------------------------------------------------------------------------
### read CODEm COD results for data-rich countries
cod_M <- read_hdf5_table(file.path("FILEPATH")), key="")
cod_F <- read_hdf5_table(file.path("FILEPATH")), key="")
# save model version
cat(paste0("Data-rich CODEm feeder model (males) - model version ", male_CODEm_version),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
cat(paste0("Data-rich CODEm feeder model (females) - model version ", female_CODEm_version),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# combine M/F CODEm results
cod_DR <- rbind(cod_M, cod_F, fill=TRUE)
cod_DR <- cod_DR[, draw_cols_upload, with=FALSE]
# hybridize data-rich and custom models
data_rich <- unique(cod_DR$location_id)
deaths_glb <- pred_death_save[!location_id %in% data_rich, ]
colnames(deaths_glb) <- draw_cols_upload
deaths_hyb <- rbind(deaths_glb, cod_DR)
# keep only needed age groups
deaths_hyb <- deaths_hyb[age_group_id %in% c(age_start:age_end), ]
pred_death_save <- copy(deaths_hyb)
#***********************************************************************************************************************
########################################################################################################################
##### PART TWO: FORMAT FOR CODCORRECT ##################################################################################
########################################################################################################################
#----SAVE RESULTS-------------------------------------------------------------------------------------------------------
### save to share directory for upload
colnames(pred_death_save) <- draw_cols_upload
pred_death_save[, measure_id := 1]
lapply(unique(pred_death_save$location_id), function(x) write.csv(pred_death_save[location_id==x, ], file.path(cl.death.dir, paste0(x, ".csv")), row.names=FALSE))
print(paste0("death draws saved in ", cl.death.dir))
### save_results
job <- paste0("qsub -N s_cod_", acause, " -pe multi_slot 50 -P PROJECT -o /FILEPATH/", username, " -e /FILEPATH/", username,
" /FILEPATH/r_shell.sh /FILEPATH/save_results_wrapper.r",
" --args",
" --type cod",
" --me_id ", cause_id,
" --input_directory ", cl.death.dir,
" --descript ", "\"", gsub(" ", "_", save_results_description), "\"",
" --best ", mark_model_best)
system(job); print(job)
#***********************************************************************************************************************
}
if (CALCULATE_NONFATAL == "yes") {
########################################################################################################################
##### PART THREE: DisMod NONFATAL RESULTS ##############################################################################
########################################################################################################################
#----GET CFR------------------------------------------------------------------------------------------------------------
### read in results from CFR model in DisMod
cfr_dismod <- get_draws(gbd_id_type="modelable_entity_id", gbd_id=2834, status="best", gbd_round_id=gbd_round, source="epi")
# save model version
cat(paste0("Case fatality ratio DisMod model (me_id 2834) - model run ", unique(cfr_dismod$model_version_id)),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# remove excess columns
cfr_dismod <- cfr_dismod[, draw_cols_upload, with=FALSE]
colnames(cfr_dismod) <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("cfr_draw_", draw_nums_gbd))
#***********************************************************************************************************************
########################################################################################################################
##### PART FOUR: MORTALITY TO NONFATAL #################################################################################
########################################################################################################################
#----PREP---------------------------------------------------------------------------------------------------------------
### bring in duration data
duration <- read.csv(file.path(j_root, "FILEPATH/duration_draws.csv")) %>% data.table
duration <- duration[cause=="A05.a", ]
colnames(duration) <- c("cause", paste0("dur_draw_", draw_nums_gbd))
### prep death data
deaths <- get_draws("cause_id", cause_id, "codcorrect", location_id=pop_locs, year_id=unique(cfr_dismod$year_id), gbd_round_id=gbd_round,
measure_id=1, status=compare_version)
# save model version
if (!is.null(unique(deaths$output_version_id))) vers <- unique(deaths$output_version_id) else vers <- custom_version
cat(paste0("CodCorrect results - output version ", vers),
file=file.path(j.version.dir.logs, "input_model_version_ids.txt"), sep="\n", append=TRUE)
# deaths <- copy(pred_death_save) # run this line instead of above if don't have CodCorrect results yet
# remove excess columns
deaths <- deaths[, draw_cols_upload, with=FALSE]
colnames(deaths) <- c("location_id", "year_id", "age_group_id", "sex_id", paste0("death_draw_", draw_nums_gbd))
### bring together variables for nonfatal calculations
predict_nonfatal <- merge(cfr_dismod, deaths, by=c("location_id", "year_id", "age_group_id", "sex_id"))
predict_nonfatal <- merge(predict_nonfatal, pop_env, by=c("location_id", "year_id", "age_group_id", "sex_id"))
predict_nonfatal <- merge(predict_nonfatal[, cause := "A05.a"], duration, by="cause", all.x=TRUE)
#***********************************************************************************************************************
#----CALCULATE PREVALENCE-----------------------------------------------------------------------------------------------
### calculate prevalence (mort/cfr*duration)
lapply(draw_nums_gbd, function (i) {
predict_nonfatal[, paste0("prev_draw_", i) := ( (get(paste0("death_draw_", i)) / population) / get(paste0("cfr_draw_", i)) ) * get(paste0("dur_draw_", i))]
predict_nonfatal[, paste0("inc_draw_", i) := get(paste0("prev_draw_", i)) / get(paste0("dur_draw_", i))]
})
### keep needed columns
predictions_prev_save <- predict_nonfatal[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("prev_draw_", draw_nums_gbd)), with=FALSE]
predictions_inc_save <- predict_nonfatal[, c("location_id", "year_id", "age_group_id", "sex_id", paste0("inc_draw_", draw_nums_gbd)), with=FALSE]
#***********************************************************************************************************************
########################################################################################################################
##### PART FIVE: FORMAT FOR COMO #######################################################################################
########################################################################################################################
#----SAVE RESULTS-------------------------------------------------------------------------------------------------------
### format prevalence for como, prevalence measure_id==5
colnames(predictions_prev_save) <- draw_cols_upload
predictions_prev_save[, measure_id := 5]
### format incidence for como, incidence measure_id==6
colnames(predictions_inc_save) <- draw_cols_upload
predictions_inc_save[, measure_id := 6]
### write nonfatal outcomes to cluster
save_nonfatal <- rbind(predictions_prev_save, predictions_inc_save)
lapply(unique(save_nonfatal$location_id), function(x) write.csv(save_nonfatal[location_id==x, ],
file.path(cl.version.dir, paste0(x, ".csv")), row.names=FALSE))
print(paste0("nonfatal estimates saved in ", cl.version.dir))
### upload results to db
job <- paste0("qsub -N s_epi_", acause, " -pe multi_slot 40 -P PROJECT -o /FILEPATH/", username, " -e /FILEPATH/", username,
" /FILEPATH/r_shell.sh /FILEPATH/save_results_wrapper.r",
" --args",
" --type epi",
" --me_id ", me_id,
" --year_ids ", paste(unique(save_nonfatal$year_id), collapse=","),
" --input_directory ", cl.version.dir,
" --descript ", "\"", gsub(" ", "_", save_results_description), "\"",
" --best ", mark_model_best)
system(job); print(job)
#***********************************************************************************************************************
} |
# Attaches packages the code needs to run
suppressMessages(require(reshape))
suppressMessages(require(zipcode))
suppressMessages(require(gtools))
# Sets working directory, reads file and creates a nickname
setwd("Y:/monthly import/201705/raw")
wd <- getwd()
date <- format(Sys.Date(), "%B%Y")
# Reads in files in format March2015Horizon.csv
united <- read.csv(paste(wd,"/",date,"United", ".csv", sep=""), header=TRUE, stringsAsFactors = FALSE)
horizon <- read.delim(paste(wd,"/",date,"Horizon",".txt",sep = ""), sep="|", quote = "", stringsAsFactors=FALSE)
# De-duplicate horizon file
horizon <- unique(horizon)
# Concatenates Provider information
united$CURR_PCP_FULL_NAME <- paste(united$PROV_LNAME, united$PROV_FNAME, sep=", ")
# Removes unused fields
united$AGE <- NULL
united$PANEL_ID <- NULL
united$PLAN_DESC <- NULL
united$TERM_DATE <- NULL
united$PLAN_CODE <- NULL
united$PROV_FNAME <- NULL
united$PROV_LNAME <- NULL
united$PROV_LANG_1 <- NULL
united$PROV_LANG_2 <- NULL
united$PROV_LANG_3 <- NULL
united$PROV_EFF_DATE <- NULL
united$EFFECTIVE_DATE <- NULL
united$PROV_TERM_DATE <- NULL
united$COSMOS_CUST_SEG <- NULL
united$LINE_OF_BUSINESS <- NULL
united$COSMOS_CUST_SEG_DESC <- NULL
horizon$Member_Months <- NULL
horizon$Future_Rx_Costs <- NULL
horizon$Total_Mem_Months <- NULL
horizon$Future_Risk_Costs <- NULL
horizon$Primary_Risk_Factor <- NULL
horizon$Prior_Rx_Costs_Annualized <- NULL
horizon$Prior_Total_Costs_Annualized <- NULL
# Renames fields in the United and Horizon cap lists
united <- rename(united, DOB = DATE_OF_BIRTH)
united <- rename(united, GENDER = MEMB_GENDER)
united <- rename(united, CURR_PCP_ID = PROVIDER_ID)
united <- rename(united, PHONE_NUMBER = PROV_PHONE)
united <- rename(united, CURR_PCP_ADDRESS_LINE_1 = PROV_ADDRESS_LINE_1)
united <- rename(united, CURR_PCP_ADDRESS_LINE_2 = PROV_ADDRESS_LINE_2)
united <- rename(united, CURR_PCP_CITY = PROV_CITY)
united <- rename(united, CURR_PCP_STATE = PROV_STATE)
united <- rename(united, CURR_PCP_ZIP = PROV_ZIP)
united <- rename(united, VEND_FULL_NAME = PAYEE_NAME)
horizon <- rename(horizon, SUBSCRIBER_ID = Subscriber_ID)
horizon <- rename(horizon, GENDER = Gender)
horizon <- rename(horizon, SOCIAL_SEC_NO = SSN)
# Adds necessary fields to the Horizon file for merging
horizon$MEDICARE_NO <- ""
horizon$MEMB_ETHNICITY <- ""
horizon$MEMB_LANGUAGE <- ""
# Maps languages in the united file to the full name of the language
united$MEMB_LANGUAGE <- as.character(united$MEMB_LANGUAGE)
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ARA"] <- "Arabic"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="CHI"] <- "Chinese"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ENG"] <- "English"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="FRE"] <- "French"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="HEB"] <- "Hebrew"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ITA"] <- "Italian"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="KOR"] <- "Korean"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="N/A"] <- ""
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="PER"] <- "Persian"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="POR"] <- "Portuegese"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="SPA"] <- "Spanish"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="TUR"] <- "Turkish"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="UNK"] <- "Unknown"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="VIE"] <- "Vietnamese"
# Adds text identifiers to Subscriber IDs
united$SUBSCRIBER_ID <- paste("U", united$SUBSCRIBER_ID, sep="")
horizon$SUBSCRIBER_ID <- paste("H", horizon$SUBSCRIBER_ID, sep="")
# Sets the MEDICAID_NO field as numeric to get rid of scientific notation
options(scipen=999)
united$MEDICAID_NO <- as.numeric(as.character(united$MEDICAID_NO))
# Cleans the home phone number field of parentheses, spaces and dashes
united$HOME_PHONE_NUMBER <- gsub("\\(|\\)|\\-|\\ ", "", united$HOME_PHONE_NUMBER)
# Cleans zip codes
united$MEMB_ZIP <- clean.zipcodes(united$MEMB_ZIP)
united$CURR_PCP_ZIP <- clean.zipcodes(united$MEMB_ZIP)
horizon$MEMB_ZIP <- clean.zipcodes(horizon$MEMB_ZIP)
horizon$CURR_PCP_ZIP <- clean.zipcodes(horizon$CURR_PCP_ZIP)
# Cleans birth dates
united$DOB <- as.Date(united$DOB, "%m/%d/%Y")
#Deletes entries with the wrong vendor names#
united <- subset(united, !(VEND_FULL_NAME=="CHILD REGIONAL/CAMDEN"))
# Keeps only where PCP City is Camden or Pennsauken, and keeps all of CamCare
united <- subset(united, CURR_PCP_CITY=="CAMDEN" | CURR_PCP_CITY=="CADMEN" |CURR_PCP_CITY=="CANDEM" |CURR_PCP_CITY=="PENNSAUKEN" | VEND_FULL_NAME=="CAMCARE HEALTH CORPORATION")
# If the code to rename vendors gives you trouble, modify the below code to fix the errors#
united <- data.frame(lapply(united, as.character), stringsAsFactors=FALSE)
# Adds Identification fields
united$PAYER <- "UNITED"
united$Source <- "United"
horizon$PAYER <- "HORIZON"
horizon$Source <- "Horizon"
# Sorts columns in united and horizon A-Z
united <- united[,order(names(united))]
horizon <- horizon[,order(names(horizon))]
# Merges united and horizon data
AllPayers <- rbind(united,horizon)
# Converts Current PCP City to all capital letters
AllPayers$CURR_PCP_CITY <- toupper(AllPayers$CURR_PCP_CITY)
#Renames vendors to match Current PCP City#
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "LOURDES MEDICAL ASSOCIATES_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES PA" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "LOURDES MEDICAL ASSOCIATES_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES PA" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "OSBORN FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "THE OSBORN FAMILY HEALTH CENTER" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "THE OSBORN FAMILY HEALTH CENTER INC" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "BROADWAY FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "RELIANCE BROADWAY_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "BROADWAY FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "RELIANCE BROADWAY_PENNSAUKEN"
#Maps to practices#
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ACOSTA RAMON"] <- "Acosta"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE BROADWAY_CAMDEN"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "NEW JERSEY MEDICAL AND HEALTH ASSOCIATES LLC"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE MEDICAL GROUP"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE MEDICAL GROUP LLC"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE BROADWAY_PENNSAUKEN"] <- "Reliance Pennsauken"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "CAMCARE HEALTH CORPORATION"] <- "CAMcare"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER AMBULATORY PEDIATRICS"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER FAMILY MEDICINE"] <- "Cooper Family"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER FAMILY MEDICINE PC"] <- "Cooper Family"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PEDIATRICS"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM - PEDIATRICS DEPARTMENT "] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM - PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PHYSICIANS OFFICES"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PHYSICIAN OFFICES PA"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "Cooper_UHI_Nic"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "JEFFREY A KLEEMAN DO"] <- "Fairview"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES_CAMDEN"] <- "Osborn"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"] <- "Lourdes Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "OSBORN FAMILY PRACTICE_CAMDEN"] <- "Osborn"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "PROJECT HOPE"] <- "Project Hope"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "PROJECT HOPE HOMELESS PROGRAM"] <- "Project Hope"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RIVER PRIMARY CARE CENTER"] <- "Reliance River"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MED SVCS"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKES CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MED SVCS"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE’S CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA FAMILY MEDICINE-COOPER RIVER"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA MEDICAL GROUP"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA MEDICAL GROUP PA"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "NELSON HOMER L"] <- "Broadway Community"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ERVILUS PATRICK"] <- "Broadway Community"
# Sets as dataframe
AllPayers <- as.data.frame(AllPayers)
# Removes fields that don't need to go to CareEvolution from the CareEvolution version of the file
AllPayers$MEMB_LANGUAGE <- NULL
AllPayers$MEMB_ETHNICITY <- NULL
# Adds last capitation
AllPayers$LastCapitationDate <- format(Sys.time(), "%m/01/%Y")
# Remove "U" from string to match TrackVia Subscriber IDs
AllPayers$SUBSCRIBER_ID <- gsub("U", "", AllPayers$SUBSCRIBER_ID)
# Function to check for missing data in vector
missing_practice <- function(x){
if (is.na(x) == TRUE) stop ('Dataframe includes missing practices. Fix these before sending to CareEvolution')
}
# Prints warning message if dataframe is missing practices
missing_practice(AllPayers$PRACTICE)
# Creates data frame with records a practice
# These need to be fixed before sending to Nick (!!!)
NO_PRACTICE <- subset(AllPayers, is.na(AllPayers$PRACTICE))
# Exports file for CareEvolution
# Includes those with no practice
write.csv(AllPayers, (file=paste(format(Sys.Date(), "%Y-%m-%d-"),"AllPayers", ".csv", sep="")), row.names=FALSE)
# Breakdown by practice: CSV file
library(dplyr)
AllPayers %>%
group_by(PRACTICE, Source) %>%
count() %>%
write.csv("practice_count.csv", row.names = F)
# PCP of NO_PRACTICE
NO_PRACTICE %>%
group_by(VEND_FULL_NAME, Source, CURR_PCP_CITY) %>%
count() %>%
write.csv("no_practice_count.csv", row.names = F)
| /AllPayers.R | no_license | itirsnpk/monthly-data-import | R | false | false | 11,685 | r | # Attaches packages the code needs to run
suppressMessages(require(reshape))
suppressMessages(require(zipcode))
suppressMessages(require(gtools))
# Sets working directory, reads file and creates a nickname
setwd("Y:/monthly import/201705/raw")
wd <- getwd()
date <- format(Sys.Date(), "%B%Y")
# Reads in files in format March2015Horizon.csv
united <- read.csv(paste(wd,"/",date,"United", ".csv", sep=""), header=TRUE, stringsAsFactors = FALSE)
horizon <- read.delim(paste(wd,"/",date,"Horizon",".txt",sep = ""), sep="|", quote = "", stringsAsFactors=FALSE)
# De-duplicate horizon file
horizon <- unique(horizon)
# Concatenates Provider information
united$CURR_PCP_FULL_NAME <- paste(united$PROV_LNAME, united$PROV_FNAME, sep=", ")
# Removes unused fields
united$AGE <- NULL
united$PANEL_ID <- NULL
united$PLAN_DESC <- NULL
united$TERM_DATE <- NULL
united$PLAN_CODE <- NULL
united$PROV_FNAME <- NULL
united$PROV_LNAME <- NULL
united$PROV_LANG_1 <- NULL
united$PROV_LANG_2 <- NULL
united$PROV_LANG_3 <- NULL
united$PROV_EFF_DATE <- NULL
united$EFFECTIVE_DATE <- NULL
united$PROV_TERM_DATE <- NULL
united$COSMOS_CUST_SEG <- NULL
united$LINE_OF_BUSINESS <- NULL
united$COSMOS_CUST_SEG_DESC <- NULL
horizon$Member_Months <- NULL
horizon$Future_Rx_Costs <- NULL
horizon$Total_Mem_Months <- NULL
horizon$Future_Risk_Costs <- NULL
horizon$Primary_Risk_Factor <- NULL
horizon$Prior_Rx_Costs_Annualized <- NULL
horizon$Prior_Total_Costs_Annualized <- NULL
# Renames fields in the United and Horizon cap lists
united <- rename(united, DOB = DATE_OF_BIRTH)
united <- rename(united, GENDER = MEMB_GENDER)
united <- rename(united, CURR_PCP_ID = PROVIDER_ID)
united <- rename(united, PHONE_NUMBER = PROV_PHONE)
united <- rename(united, CURR_PCP_ADDRESS_LINE_1 = PROV_ADDRESS_LINE_1)
united <- rename(united, CURR_PCP_ADDRESS_LINE_2 = PROV_ADDRESS_LINE_2)
united <- rename(united, CURR_PCP_CITY = PROV_CITY)
united <- rename(united, CURR_PCP_STATE = PROV_STATE)
united <- rename(united, CURR_PCP_ZIP = PROV_ZIP)
united <- rename(united, VEND_FULL_NAME = PAYEE_NAME)
horizon <- rename(horizon, SUBSCRIBER_ID = Subscriber_ID)
horizon <- rename(horizon, GENDER = Gender)
horizon <- rename(horizon, SOCIAL_SEC_NO = SSN)
# Adds necessary fields to the Horizon file for merging
horizon$MEDICARE_NO <- ""
horizon$MEMB_ETHNICITY <- ""
horizon$MEMB_LANGUAGE <- ""
# Maps languages in the united file to the full name of the language
united$MEMB_LANGUAGE <- as.character(united$MEMB_LANGUAGE)
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ARA"] <- "Arabic"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="CHI"] <- "Chinese"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ENG"] <- "English"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="FRE"] <- "French"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="HEB"] <- "Hebrew"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="ITA"] <- "Italian"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="KOR"] <- "Korean"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="N/A"] <- ""
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="PER"] <- "Persian"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="POR"] <- "Portuegese"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="SPA"] <- "Spanish"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="TUR"] <- "Turkish"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="UNK"] <- "Unknown"
united$MEMB_LANGUAGE[united$MEMB_LANGUAGE=="VIE"] <- "Vietnamese"
# Adds text identifiers to Subscriber IDs
united$SUBSCRIBER_ID <- paste("U", united$SUBSCRIBER_ID, sep="")
horizon$SUBSCRIBER_ID <- paste("H", horizon$SUBSCRIBER_ID, sep="")
# Sets the MEDICAID_NO field as numeric to get rid of scientific notation
options(scipen=999)
united$MEDICAID_NO <- as.numeric(as.character(united$MEDICAID_NO))
# Cleans the home phone number field of parentheses, spaces and dashes
united$HOME_PHONE_NUMBER <- gsub("\\(|\\)|\\-|\\ ", "", united$HOME_PHONE_NUMBER)
# Cleans zip codes
united$MEMB_ZIP <- clean.zipcodes(united$MEMB_ZIP)
united$CURR_PCP_ZIP <- clean.zipcodes(united$MEMB_ZIP)
horizon$MEMB_ZIP <- clean.zipcodes(horizon$MEMB_ZIP)
horizon$CURR_PCP_ZIP <- clean.zipcodes(horizon$CURR_PCP_ZIP)
# Cleans birth dates
united$DOB <- as.Date(united$DOB, "%m/%d/%Y")
#Deletes entries with the wrong vendor names#
united <- subset(united, !(VEND_FULL_NAME=="CHILD REGIONAL/CAMDEN"))
# Keeps only where PCP City is Camden or Pennsauken, and keeps all of CamCare
united <- subset(united, CURR_PCP_CITY=="CAMDEN" | CURR_PCP_CITY=="CADMEN" |CURR_PCP_CITY=="CANDEM" |CURR_PCP_CITY=="PENNSAUKEN" | VEND_FULL_NAME=="CAMCARE HEALTH CORPORATION")
# If the code to rename vendors gives you trouble, modify the below code to fix the errors#
united <- data.frame(lapply(united, as.character), stringsAsFactors=FALSE)
# Adds Identification fields
united$PAYER <- "UNITED"
united$Source <- "United"
horizon$PAYER <- "HORIZON"
horizon$Source <- "Horizon"
# Sorts columns in united and horizon A-Z
united <- united[,order(names(united))]
horizon <- horizon[,order(names(horizon))]
# Merges united and horizon data
AllPayers <- rbind(united,horizon)
# Converts Current PCP City to all capital letters
AllPayers$CURR_PCP_CITY <- toupper(AllPayers$CURR_PCP_CITY)
#Renames vendors to match Current PCP City#
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "LOURDES MEDICAL ASSOCIATES_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES PA" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "LOURDES MEDICAL ASSOCIATES_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES PA" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "OSBORN FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "THE OSBORN FAMILY HEALTH CENTER" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "THE OSBORN FAMILY HEALTH CENTER INC" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "OSBORN FAMILY PRACTICE_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "BROADWAY FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "CAMDEN"] <- "RELIANCE BROADWAY_CAMDEN"
AllPayers$VEND_FULL_NAME[AllPayers$VEND_FULL_NAME == "BROADWAY FAMILY PRACTICE" & AllPayers$CURR_PCP_CITY == "PENNSAUKEN"] <- "RELIANCE BROADWAY_PENNSAUKEN"
#Maps to practices#
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ACOSTA RAMON"] <- "Acosta"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE BROADWAY_CAMDEN"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "NEW JERSEY MEDICAL AND HEALTH ASSOCIATES LLC"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE MEDICAL GROUP"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE MEDICAL GROUP LLC"] <- "Reliance Broadway"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RELIANCE BROADWAY_PENNSAUKEN"] <- "Reliance Pennsauken"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "CAMCARE HEALTH CORPORATION"] <- "CAMcare"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER AMBULATORY PEDIATRICS"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER FAMILY MEDICINE"] <- "Cooper Family"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER FAMILY MEDICINE PC"] <- "Cooper Family"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PEDIATRICS"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM - PEDIATRICS DEPARTMENT "] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER HEALTH SYSTEM - PEDIATRICS DEPARTMENT"] <- "Cooper Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PHYSICIANS OFFICES"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "COOPER PHYSICIAN OFFICES PA"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "Cooper_UHI_Nic"] <- "Cooper IM"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "JEFFREY A KLEEMAN DO"] <- "Fairview"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES_CAMDEN"] <- "Osborn"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "LOURDES MEDICAL ASSOCIATES_PENNSAUKEN"] <- "Lourdes Pediatrics"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "OSBORN FAMILY PRACTICE_CAMDEN"] <- "Osborn"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "PROJECT HOPE"] <- "Project Hope"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "PROJECT HOPE HOMELESS PROGRAM"] <- "Project Hope"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "RIVER PRIMARY CARE CENTER"] <- "Reliance River"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MED SVCS"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKES CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MED SVCS"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE’S CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ST LUKE'S CATHOLIC MEDICAL SERVICES INC"] <- "St. Lukes"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA FAMILY MEDICINE-COOPER RIVER"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA MEDICAL GROUP"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "VIRTUA MEDICAL GROUP PA"] <- "Virtua"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "NELSON HOMER L"] <- "Broadway Community"
AllPayers$PRACTICE[AllPayers$VEND_FULL_NAME == "ERVILUS PATRICK"] <- "Broadway Community"
# Sets as dataframe
AllPayers <- as.data.frame(AllPayers)
# Removes fields that don't need to go to CareEvolution from the CareEvolution version of the file
AllPayers$MEMB_LANGUAGE <- NULL
AllPayers$MEMB_ETHNICITY <- NULL
# Adds last capitation
AllPayers$LastCapitationDate <- format(Sys.time(), "%m/01/%Y")
# Remove "U" from string to match TrackVia Subscriber IDs
AllPayers$SUBSCRIBER_ID <- gsub("U", "", AllPayers$SUBSCRIBER_ID)
# Function to check for missing data in vector
missing_practice <- function(x){
if (is.na(x) == TRUE) stop ('Dataframe includes missing practices. Fix these before sending to CareEvolution')
}
# Prints warning message if dataframe is missing practices
missing_practice(AllPayers$PRACTICE)
# Creates data frame with records a practice
# These need to be fixed before sending to Nick (!!!)
NO_PRACTICE <- subset(AllPayers, is.na(AllPayers$PRACTICE))
# Exports file for CareEvolution
# Includes those with no practice
write.csv(AllPayers, (file=paste(format(Sys.Date(), "%Y-%m-%d-"),"AllPayers", ".csv", sep="")), row.names=FALSE)
# Breakdown by practice: CSV file
library(dplyr)
AllPayers %>%
group_by(PRACTICE, Source) %>%
count() %>%
write.csv("practice_count.csv", row.names = F)
# PCP of NO_PRACTICE
NO_PRACTICE %>%
group_by(VEND_FULL_NAME, Source, CURR_PCP_CITY) %>%
count() %>%
write.csv("no_practice_count.csv", row.names = F)
|
spark_version_clean <- function(version) {
gsub("([0-9]+\\.?)[^0-9\\.](.*)","\\1", version)
}
#' Version of Spark for a connection
#'
#' @param sc \code{spark_connection}
#'
#' @return A \code{\link{numeric_version}} object
#'
#' @export
spark_version <- function(sc) {
# get the version
version <- invoke(spark_context(sc), "version")
# Get rid of -preview and other suffix variations
version <- spark_version_clean(version)
# return numeric version
numeric_version(version)
}
spark_version_from_home_version <- function() {
version <- Sys.getenv("SPARK_HOME_VERSION")
if (nchar(version) <= 0) NULL else version
}
#' Version of Spark for a SPARK_HOME directory
#'
#' @param spark_home Path to SPARK_HOME
#' @param default The version to use as default
#'
#' @rdname spark_version
#'
#' @export
spark_version_from_home <- function(spark_home, default = NULL) {
versionAttempts <- list(
useReleaseFile = function() {
versionedFile <- file.path(spark_home, "RELEASE")
if (file.exists(versionedFile)) {
releaseContents <- readLines(versionedFile)
if (!is.null(releaseContents) && length(releaseContents) > 0) {
gsub("Spark | built.*", "", releaseContents[[1]])
}
}
},
useAssemblies = function() {
candidateVersions <- list(
list(path = "lib", pattern = "spark-assembly-([0-9\\.]*)-hadoop.[0-9\\.]*\\.jar"),
list(path = "yarn", pattern = "spark-([0-9\\.]*)-preview-yarn-shuffle\\.jar")
)
candidateFiles <- lapply(candidateVersions, function(e) {
c(e,
list(
files = list.files(
file.path(spark_home, e$path),
pattern = e$pattern
)
)
)
})
filteredCandidates <- Filter(function(f) length(f$files) > 0, candidateFiles)
if (length(filteredCandidates) > 0) {
valid <- filteredCandidates[[1]]
e <- regexec(valid$pattern, valid$files[[1]])
match <- regmatches(valid$files[[1]], e)
if (length(match) > 0 && length(match[[1]]) > 1) {
return(match[[1]][[2]])
}
}
},
useEnvironmentVariable = function() {
spark_version_from_home_version()
},
useDefault = function() {
default
}
)
for (versionAttempt in versionAttempts) {
result <- versionAttempt()
if (length(result) > 0)
return(spark_version_clean(result))
}
stop(
"Failed to detect version from SPARK_HOME or SPARK_HOME_VERSION. ",
"Try passing the spark version explicitly.")
}
| /R/spark_version.R | permissive | taeyoung-yoon/sparklyr | R | false | false | 2,568 | r | spark_version_clean <- function(version) {
gsub("([0-9]+\\.?)[^0-9\\.](.*)","\\1", version)
}
#' Version of Spark for a connection
#'
#' @param sc \code{spark_connection}
#'
#' @return A \code{\link{numeric_version}} object
#'
#' @export
spark_version <- function(sc) {
# get the version
version <- invoke(spark_context(sc), "version")
# Get rid of -preview and other suffix variations
version <- spark_version_clean(version)
# return numeric version
numeric_version(version)
}
spark_version_from_home_version <- function() {
version <- Sys.getenv("SPARK_HOME_VERSION")
if (nchar(version) <= 0) NULL else version
}
#' Version of Spark for a SPARK_HOME directory
#'
#' @param spark_home Path to SPARK_HOME
#' @param default The version to use as default
#'
#' @rdname spark_version
#'
#' @export
spark_version_from_home <- function(spark_home, default = NULL) {
versionAttempts <- list(
useReleaseFile = function() {
versionedFile <- file.path(spark_home, "RELEASE")
if (file.exists(versionedFile)) {
releaseContents <- readLines(versionedFile)
if (!is.null(releaseContents) && length(releaseContents) > 0) {
gsub("Spark | built.*", "", releaseContents[[1]])
}
}
},
useAssemblies = function() {
candidateVersions <- list(
list(path = "lib", pattern = "spark-assembly-([0-9\\.]*)-hadoop.[0-9\\.]*\\.jar"),
list(path = "yarn", pattern = "spark-([0-9\\.]*)-preview-yarn-shuffle\\.jar")
)
candidateFiles <- lapply(candidateVersions, function(e) {
c(e,
list(
files = list.files(
file.path(spark_home, e$path),
pattern = e$pattern
)
)
)
})
filteredCandidates <- Filter(function(f) length(f$files) > 0, candidateFiles)
if (length(filteredCandidates) > 0) {
valid <- filteredCandidates[[1]]
e <- regexec(valid$pattern, valid$files[[1]])
match <- regmatches(valid$files[[1]], e)
if (length(match) > 0 && length(match[[1]]) > 1) {
return(match[[1]][[2]])
}
}
},
useEnvironmentVariable = function() {
spark_version_from_home_version()
},
useDefault = function() {
default
}
)
for (versionAttempt in versionAttempts) {
result <- versionAttempt()
if (length(result) > 0)
return(spark_version_clean(result))
}
stop(
"Failed to detect version from SPARK_HOME or SPARK_HOME_VERSION. ",
"Try passing the spark version explicitly.")
}
|
# context("phylomatic")
#
# taxa <- c("Poa annua", "Phlox diffusa", "Helianthus annuus")
#
# test_that("phylomatic - GET (default) method works", {
# skip_on_cran()
#
# tree <- phylomatic(taxa = taxa)
#
# expect_is(taxa, "character")
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
#
# test_that("phylomatic - POST method works", {
# skip_on_cran()
#
# tree <- phylomatic(taxa = taxa, get = 'POST')
#
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
# test_that("phylomatic - stored tree", {
# skip_on_cran()
#
# tree <- phylomatic(taxa, storedtree = 'smith2011')
#
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
# test_that("phylomatic - nexml output format", {
# skip_on_cran()
#
# out <- phylomatic(taxa, outformat = "nexml")
#
# expect_is(out, "phylomatic")
# expect_is(out[1], "character")
# expect_true(grepl("nexml", out))
# })
#
# # FIXME: dryad url is broken, can't find new url
# # test_that("phylomatic - treeuri param", {
# # skip_on_cran()
#
# # spp <- c("Abies_nordmanniana", "Abies_bornmuelleriana", "Abies_cilicica", "Abies_cephalonica",
# # "Abies_numidica", "Abies_pinsapo", "Abies_alba")
# # url <- "http://datadryad.org/bitstream/handle/10255/dryad.8791/final_tree.tre?sequence=1"
# # tree <- phylomatic(taxa = spp, treeuri = url)
#
# # expect_is(tree, "phylo")
# # expect_is(tree, "phylomatic")
# # expect_equal(tree$tip.label, spp)
# # })
#
#
# test_that("phylomatic fails as expected", {
# skip_on_cran()
#
# # fails when no taxa given
# expect_error(phylomatic(), "argument \"taxa\" is missing")
#
# # fails when no taxnames FALSE and improper name strings passed
# expect_error(phylomatic(taxa, taxnames = FALSE), "No taxa in common")
#
# # fails when get isn't in allowed set
# expect_error(phylomatic(taxa, get = "STUFF"), "get must be one of 'POST' or 'GET'")
#
# # fails when too many taxa passed and get='GET'
# library("taxize")
# spp <- taxize::names_list("species", 200)
# expect_error(phylomatic(spp, get = "GET"), "\\(414\\) Request-URI Too Long")
# })
| /tests/testthat/test-phylomatic.R | permissive | ropensci/brranching | R | false | false | 2,108 | r | # context("phylomatic")
#
# taxa <- c("Poa annua", "Phlox diffusa", "Helianthus annuus")
#
# test_that("phylomatic - GET (default) method works", {
# skip_on_cran()
#
# tree <- phylomatic(taxa = taxa)
#
# expect_is(taxa, "character")
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
#
# test_that("phylomatic - POST method works", {
# skip_on_cran()
#
# tree <- phylomatic(taxa = taxa, get = 'POST')
#
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
# test_that("phylomatic - stored tree", {
# skip_on_cran()
#
# tree <- phylomatic(taxa, storedtree = 'smith2011')
#
# expect_is(tree, "phylo")
# expect_is(tree, "phylomatic")
# })
#
# test_that("phylomatic - nexml output format", {
# skip_on_cran()
#
# out <- phylomatic(taxa, outformat = "nexml")
#
# expect_is(out, "phylomatic")
# expect_is(out[1], "character")
# expect_true(grepl("nexml", out))
# })
#
# # FIXME: dryad url is broken, can't find new url
# # test_that("phylomatic - treeuri param", {
# # skip_on_cran()
#
# # spp <- c("Abies_nordmanniana", "Abies_bornmuelleriana", "Abies_cilicica", "Abies_cephalonica",
# # "Abies_numidica", "Abies_pinsapo", "Abies_alba")
# # url <- "http://datadryad.org/bitstream/handle/10255/dryad.8791/final_tree.tre?sequence=1"
# # tree <- phylomatic(taxa = spp, treeuri = url)
#
# # expect_is(tree, "phylo")
# # expect_is(tree, "phylomatic")
# # expect_equal(tree$tip.label, spp)
# # })
#
#
# test_that("phylomatic fails as expected", {
# skip_on_cran()
#
# # fails when no taxa given
# expect_error(phylomatic(), "argument \"taxa\" is missing")
#
# # fails when no taxnames FALSE and improper name strings passed
# expect_error(phylomatic(taxa, taxnames = FALSE), "No taxa in common")
#
# # fails when get isn't in allowed set
# expect_error(phylomatic(taxa, get = "STUFF"), "get must be one of 'POST' or 'GET'")
#
# # fails when too many taxa passed and get='GET'
# library("taxize")
# spp <- taxize::names_list("species", 200)
# expect_error(phylomatic(spp, get = "GET"), "\\(414\\) Request-URI Too Long")
# })
|
#Store household_power_consumption.txt file path in filePath variable
filePath <- "./Data/household_power_consumption.txt"
#Read data from file
#1. Set header=TRUE -> Signifies that file has header
#2. Set sep=";" -> Signifies each column separated by ";"
#3. stringsAsFactors=FALSE -> read data as it is without any conversions
#4. dec="." -> Decimal separator for numeric values
data <- read.table(filePath, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#Extract data between the dates 2007-02-01 and 2007-02-02
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#Convert Global_active_power column data to number and store it in globalActivePower variable
globalActivePower <- as.numeric(subSetData$Global_active_power)
#Initiate device to plot histogram
png("plot1.png", width=480, height=480)
#Plot histogram
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
#Turn device off to ensure that histogram is written of on file device
dev.off()
| /Plot1.R | no_license | ak51102/ExData_Plotting1 | R | false | false | 1,026 | r | #Store household_power_consumption.txt file path in filePath variable
filePath <- "./Data/household_power_consumption.txt"
#Read data from file
#1. Set header=TRUE -> Signifies that file has header
#2. Set sep=";" -> Signifies each column separated by ";"
#3. stringsAsFactors=FALSE -> read data as it is without any conversions
#4. dec="." -> Decimal separator for numeric values
data <- read.table(filePath, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#Extract data between the dates 2007-02-01 and 2007-02-02
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#Convert Global_active_power column data to number and store it in globalActivePower variable
globalActivePower <- as.numeric(subSetData$Global_active_power)
#Initiate device to plot histogram
png("plot1.png", width=480, height=480)
#Plot histogram
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
#Turn device off to ensure that histogram is written of on file device
dev.off()
|
context("missing")
test_that("missing", {
data <- data.frame(x = 1, y = 2, z = 0)
expect_identical(check_missing_colnames(data, "a"), data)
expect_error(check_missing_colnames(data, c("y", "x", "a")), "data must not have columns 'x' and 'y'")
})
| /tests/testthat/test-missing.R | permissive | cran/checkr | R | false | false | 255 | r | context("missing")
test_that("missing", {
data <- data.frame(x = 1, y = 2, z = 0)
expect_identical(check_missing_colnames(data, "a"), data)
expect_error(check_missing_colnames(data, c("y", "x", "a")), "data must not have columns 'x' and 'y'")
})
|
context("text_names")
test_that("`names` should be NULL for new text", {
x <- as_text("ABC")
expect_equal(names(x), NULL)
})
test_that("`names<-` should work on text", {
x <- as_text(LETTERS)
names(x) <- rev(LETTERS)
expect_equal(names(x), rev(LETTERS))
})
test_that("`as_text` should not drop names", {
x <- as_text(c(a="1", b="2"))
expect_equal(names(x), c("a", "b"))
})
test_that("`all.equal` should test names", {
x <- as_text(1:3)
y <- x
names(y) <- c("a", "b", "c")
expect_equal(all.equal(x, y), "names for current but not for target")
expect_equal(all.equal(y, x), "names for target but not for current")
})
test_that("`as_text` should not drop names", {
x <- as_text(c(foo="hello"))
y <- as_text(x)
expect_equal(y, as_text(c(foo="hello")))
})
test_that("`as_text` should drop attributes", {
x <- as_text("hello")
attr(x, "foo") <- "bar"
y <- as_text(x)
expect_equal(y, as_text("hello"))
})
test_that("`as_text` should drop attributes for JSON objects", {
file <- tempfile()
writeLines('{"text": "hello"}', file)
x <- read_ndjson(file)$text
attr(x, "foo") <- "bar"
y <- as_text(x)
expect_equal(y, as_text("hello"))
})
test_that("`names<-` should not modify copies", {
x <- as_text(1:3)
y <- x
names(y) <- c("a", "b", "c")
expect_equal(names(x), NULL)
expect_equal(names(y), c("a", "b", "c"))
})
test_that("`names<-` should preserve attributes", {
x <- as_text(1:3)
attr(x, "foo") <- "bar"
names(x) <- c("a", "b", "c")
expect_equal(names(x), c("a", "b", "c"))
expect_equal(attr(x, "foo"), "bar")
})
test_that("`names<-` should allow NA", {
x <- as_text(1:3)
names(x) <- c("a", NA, "b")
expect_equal(names(x), c("a", NA, "b"))
})
test_that("`names<-` should allow duplicates", {
x <- as_text(1:3)
names(x) <- c("a", "b", "a")
expect_equal(names(x), c("a", "b", "a"))
})
| /tests/testthat/test-text_names.R | permissive | conjugateprior/r-corpus | R | false | false | 1,965 | r | context("text_names")
test_that("`names` should be NULL for new text", {
x <- as_text("ABC")
expect_equal(names(x), NULL)
})
test_that("`names<-` should work on text", {
x <- as_text(LETTERS)
names(x) <- rev(LETTERS)
expect_equal(names(x), rev(LETTERS))
})
test_that("`as_text` should not drop names", {
x <- as_text(c(a="1", b="2"))
expect_equal(names(x), c("a", "b"))
})
test_that("`all.equal` should test names", {
x <- as_text(1:3)
y <- x
names(y) <- c("a", "b", "c")
expect_equal(all.equal(x, y), "names for current but not for target")
expect_equal(all.equal(y, x), "names for target but not for current")
})
test_that("`as_text` should not drop names", {
x <- as_text(c(foo="hello"))
y <- as_text(x)
expect_equal(y, as_text(c(foo="hello")))
})
test_that("`as_text` should drop attributes", {
x <- as_text("hello")
attr(x, "foo") <- "bar"
y <- as_text(x)
expect_equal(y, as_text("hello"))
})
test_that("`as_text` should drop attributes for JSON objects", {
file <- tempfile()
writeLines('{"text": "hello"}', file)
x <- read_ndjson(file)$text
attr(x, "foo") <- "bar"
y <- as_text(x)
expect_equal(y, as_text("hello"))
})
test_that("`names<-` should not modify copies", {
x <- as_text(1:3)
y <- x
names(y) <- c("a", "b", "c")
expect_equal(names(x), NULL)
expect_equal(names(y), c("a", "b", "c"))
})
test_that("`names<-` should preserve attributes", {
x <- as_text(1:3)
attr(x, "foo") <- "bar"
names(x) <- c("a", "b", "c")
expect_equal(names(x), c("a", "b", "c"))
expect_equal(attr(x, "foo"), "bar")
})
test_that("`names<-` should allow NA", {
x <- as_text(1:3)
names(x) <- c("a", NA, "b")
expect_equal(names(x), c("a", NA, "b"))
})
test_that("`names<-` should allow duplicates", {
x <- as_text(1:3)
names(x) <- c("a", "b", "a")
expect_equal(names(x), c("a", "b", "a"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superset.R
\name{superspec.numeric}
\alias{superspec.numeric}
\title{Create Specification for Model Inputs and Outputs From Numeric}
\usage{
\method{superspec}{numeric}(x, ...)
}
\arguments{
\item{x}{numeric}
\item{...}{passed arguments}
}
\description{
Create a specification for the result of superset() from numeric by coercing to character.
}
\seealso{
Other superset: \code{\link{generalize}},
\code{\link{meta.character}}, \code{\link{meta.numeric}},
\code{\link{metaplot.character}},
\code{\link{metaplot.numeric}},
\code{\link{metaplot_character}},
\code{\link{metasuperset}}, \code{\link{meta}},
\code{\link{ninput.character}},
\code{\link{ninput.numeric}}, \code{\link{ninput}},
\code{\link{shuffle}}, \code{\link{superset.character}},
\code{\link{superset.numeric}}, \code{\link{superset}},
\code{\link{superspec.character}},
\code{\link{superspec}}
}
| /man/superspec.numeric.Rd | no_license | romainfrancois/nonmemica | R | false | true | 964 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superset.R
\name{superspec.numeric}
\alias{superspec.numeric}
\title{Create Specification for Model Inputs and Outputs From Numeric}
\usage{
\method{superspec}{numeric}(x, ...)
}
\arguments{
\item{x}{numeric}
\item{...}{passed arguments}
}
\description{
Create a specification for the result of superset() from numeric by coercing to character.
}
\seealso{
Other superset: \code{\link{generalize}},
\code{\link{meta.character}}, \code{\link{meta.numeric}},
\code{\link{metaplot.character}},
\code{\link{metaplot.numeric}},
\code{\link{metaplot_character}},
\code{\link{metasuperset}}, \code{\link{meta}},
\code{\link{ninput.character}},
\code{\link{ninput.numeric}}, \code{\link{ninput}},
\code{\link{shuffle}}, \code{\link{superset.character}},
\code{\link{superset.numeric}}, \code{\link{superset}},
\code{\link{superspec.character}},
\code{\link{superspec}}
}
|
\name{demopat}
\encoding{latin1}
\alias{demopat}
\docType{data}
\title{Artificial Data Point Pattern from \code{spatstat} package.}
\description{
This is an artificial dataset, for use in testing and demonstrating compatibility between \code{spatstat} and \code{ads} objects. It is a multitype point pattern in an irregular polygonal window.
There are two types of points. The window contains a polygonal hole.
}
\usage{data(demopat)}
\format{
An object of class "ppp" representing a \code{spatstat} point pattern.
}
\source{
data(demopat) in \code{spatstat}
}
\examples{
data(demopat)
demo.spp<-ppp2spp(demopat)
plot(demo.spp)
}
\keyword{datasets}
| /ads/man/demopat.Rd | no_license | albrizre/spatstat.revdep | R | false | false | 655 | rd | \name{demopat}
\encoding{latin1}
\alias{demopat}
\docType{data}
\title{Artificial Data Point Pattern from \code{spatstat} package.}
\description{
This is an artificial dataset, for use in testing and demonstrating compatibility between \code{spatstat} and \code{ads} objects. It is a multitype point pattern in an irregular polygonal window.
There are two types of points. The window contains a polygonal hole.
}
\usage{data(demopat)}
\format{
An object of class "ppp" representing a \code{spatstat} point pattern.
}
\source{
data(demopat) in \code{spatstat}
}
\examples{
data(demopat)
demo.spp<-ppp2spp(demopat)
plot(demo.spp)
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{collapseReplicates}
\alias{collapseReplicates}
\title{Collapse technical replicates in a SummarizedExperiment or DESeqDataSet}
\usage{
collapseReplicates(object, groupby, run, renameCols = TRUE)
}
\arguments{
\item{object}{A \code{SummarizedExperiment} or \code{DESeqDataSet}}
\item{groupby}{a grouping factor, as long as the columns of object}
\item{run}{optional, the names of each unique column in object. if provided,
a new column \code{runsCollapsed} will be added to the \code{colData}
which pastes together the names of \code{run}}
\item{renameCols}{whether to rename the columns of the returned object
using the levels of the grouping factor}
}
\value{
the \code{object} with as many columns as levels in \code{groupby}.
This object has assay/count data which is summed from the various
columns which are grouped together, and the \code{colData} is subset using
the first column for each group in \code{groupby}.
}
\description{
Collapses the columns in \code{object} by summing within levels
of a grouping factor \code{groupby}. The purpose of this function
is to sum up read counts from technical replicates to create an object
with a single column of read counts for each sample.
Optionally renames the columns of returned object with the levels of the
grouping factor.
Note: this function is written very simply and
can be easily altered to produce other behavior by examining the source code.
}
\examples{
dds <- makeExampleDESeqDataSet(m=12)
# make data with two technical replicates for three samples
dds$sample <- factor(sample(paste0("sample",rep(1:9, c(2,1,1,2,1,1,2,1,1)))))
dds$run <- paste0("run",1:12)
ddsColl <- collapseReplicates(dds, dds$sample, dds$run)
# examine the colData and column names of the collapsed data
colData(ddsColl)
colnames(ddsColl)
# check that the sum of the counts for "sample1" is the same
# as the counts in the "sample1" column in ddsColl
matchFirstLevel <- dds$sample == levels(dds$sample)[1]
stopifnot(all(rowSums(counts(dds[,matchFirstLevel])) == counts(ddsColl[,1])))
}
| /man/collapseReplicates.Rd | no_license | aghozlane/DESeq2shaman | R | false | false | 2,090 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{collapseReplicates}
\alias{collapseReplicates}
\title{Collapse technical replicates in a SummarizedExperiment or DESeqDataSet}
\usage{
collapseReplicates(object, groupby, run, renameCols = TRUE)
}
\arguments{
\item{object}{A \code{SummarizedExperiment} or \code{DESeqDataSet}}
\item{groupby}{a grouping factor, as long as the columns of object}
\item{run}{optional, the names of each unique column in object. if provided,
a new column \code{runsCollapsed} will be added to the \code{colData}
which pastes together the names of \code{run}}
\item{renameCols}{whether to rename the columns of the returned object
using the levels of the grouping factor}
}
\value{
the \code{object} with as many columns as levels in \code{groupby}.
This object has assay/count data which is summed from the various
columns which are grouped together, and the \code{colData} is subset using
the first column for each group in \code{groupby}.
}
\description{
Collapses the columns in \code{object} by summing within levels
of a grouping factor \code{groupby}. The purpose of this function
is to sum up read counts from technical replicates to create an object
with a single column of read counts for each sample.
Optionally renames the columns of returned object with the levels of the
grouping factor.
Note: this function is written very simply and
can be easily altered to produce other behavior by examining the source code.
}
\examples{
dds <- makeExampleDESeqDataSet(m=12)
# make data with two technical replicates for three samples
dds$sample <- factor(sample(paste0("sample",rep(1:9, c(2,1,1,2,1,1,2,1,1)))))
dds$run <- paste0("run",1:12)
ddsColl <- collapseReplicates(dds, dds$sample, dds$run)
# examine the colData and column names of the collapsed data
colData(ddsColl)
colnames(ddsColl)
# check that the sum of the counts for "sample1" is the same
# as the counts in the "sample1" column in ddsColl
matchFirstLevel <- dds$sample == levels(dds$sample)[1]
stopifnot(all(rowSums(counts(dds[,matchFirstLevel])) == counts(ddsColl[,1])))
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{checkMetrics}
\alias{checkMetrics}
\title{Confirm that the metric functions are in suitable format}
\usage{
checkMetrics(x)
}
\arguments{
\item{x}{Optional named list of metric functions. Else, defines the metrics as those
defined in defineMetrics.}
}
\value{
A list of functions.
}
\description{
Utility function. Creates a list of functions, either those defined in defineMetrics
or a named list of metric functions.
}
\details{
A few quick checks to confirm the metric functions are input in suitable
format.
}
\examples{
library(geiger)
library(picante)
checkMetrics(defineMetrics())
}
\references{
Miller, Trisos and Farine.
}
| /man/checkMetrics.Rd | no_license | alexf4/metricTester | R | false | false | 693 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{checkMetrics}
\alias{checkMetrics}
\title{Confirm that the metric functions are in suitable format}
\usage{
checkMetrics(x)
}
\arguments{
\item{x}{Optional named list of metric functions. Else, defines the metrics as those
defined in defineMetrics.}
}
\value{
A list of functions.
}
\description{
Utility function. Creates a list of functions, either those defined in defineMetrics
or a named list of metric functions.
}
\details{
A few quick checks to confirm the metric functions are input in suitable
format.
}
\examples{
library(geiger)
library(picante)
checkMetrics(defineMetrics())
}
\references{
Miller, Trisos and Farine.
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/likert-package.R
\docType{package}
\name{likert-package}
\alias{likert-package}
\title{Likert Analysis and Visualization}
\description{
Likert Analysis and Visualization
}
\author{
\email{jason@bryer.org}
}
\keyword{institutional}
\keyword{likert}
\keyword{package}
\keyword{research}
| /man/likert-package.Rd | no_license | patilv/likert | R | false | false | 372 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/likert-package.R
\docType{package}
\name{likert-package}
\alias{likert-package}
\title{Likert Analysis and Visualization}
\description{
Likert Analysis and Visualization
}
\author{
\email{jason@bryer.org}
}
\keyword{institutional}
\keyword{likert}
\keyword{package}
\keyword{research}
|
library(graphics)
# 5 random dice rolls
sample(1:6, 5, replace = T)
# sum of 5 random dice rolls
sum(sample(1:6, 5, replace = T))
# average of 5 random dice rolls
mean(sample(1:6, 5, replace = T))
# What is the average of 10 experiments?
experiments10 <- numeric(length = 10)
for(i in seq_along(experiments10)){
experiments10[i] <- sum(sample(1:6, 5, replace=T))
}
mean(experiments10)
print(experiments10)
# What is the average of 100 experiments?
experiments100 <- numeric(length = 100)
for(i in seq_along(experiments100)) {
experiments100[i] <- sum(sample(1:6, 5, replace=T))
}
mean(experiments100)
print(experiments100)
# What is the average of 1000 experiments? Plot below to see the number it converges
experiments1000 <- numeric(length = 1000)
for(i in seq_along(experiments1000)) {
experiments1000[i] <-sum(sample(1:6, 5, replace=T))
}
mean(experiments1000)
print(experiments1000)
means1000 <- numeric(length = 1000)
for(i in seq_along(experiments1000)) {
means1000[i] <- mean(experiments1000[1:i])
}
# print(means1000[1])
# means1000[3] <- mean(means1000[1:2])
# means1000 <- experiments1000[1:1000]
#
# means1000[222]
plot(1:1000, means1000, 'l')
| /Labs/Dice_StoreLab/Dice.R | no_license | LeilaErbay/DataAnalytics | R | false | false | 1,179 | r | library(graphics)
# 5 random dice rolls
sample(1:6, 5, replace = T)
# sum of 5 random dice rolls
sum(sample(1:6, 5, replace = T))
# average of 5 random dice rolls
mean(sample(1:6, 5, replace = T))
# What is the average of 10 experiments?
experiments10 <- numeric(length = 10)
for(i in seq_along(experiments10)){
experiments10[i] <- sum(sample(1:6, 5, replace=T))
}
mean(experiments10)
print(experiments10)
# What is the average of 100 experiments?
experiments100 <- numeric(length = 100)
for(i in seq_along(experiments100)) {
experiments100[i] <- sum(sample(1:6, 5, replace=T))
}
mean(experiments100)
print(experiments100)
# What is the average of 1000 experiments? Plot below to see the number it converges
experiments1000 <- numeric(length = 1000)
for(i in seq_along(experiments1000)) {
experiments1000[i] <-sum(sample(1:6, 5, replace=T))
}
mean(experiments1000)
print(experiments1000)
means1000 <- numeric(length = 1000)
for(i in seq_along(experiments1000)) {
means1000[i] <- mean(experiments1000[1:i])
}
# print(means1000[1])
# means1000[3] <- mean(means1000[1:2])
# means1000 <- experiments1000[1:1000]
#
# means1000[222]
plot(1:1000, means1000, 'l')
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$plot <- renderPlot({
barplot(WorldPhones[,input$region], xlab = "Year",
ylab = "Number of Telephones",
main = as.character(input$region))
})
})
| /server.R | no_license | larui529/telephone | R | false | false | 531 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$plot <- renderPlot({
barplot(WorldPhones[,input$region], xlab = "Year",
ylab = "Number of Telephones",
main = as.character(input$region))
})
})
|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/searchCompanies.R
\name{searchCompanies}
\alias{searchCompanies}
\title{Search for Companies on LinkedIn}
\usage{
searchCompanies(token, keywords, location = NULL, industry = NULL)
}
\arguments{
\item{token}{Authorization token.}
\item{keywords}{A keyword used anywhere in a company's listing. Multiple words should be separated by a space.}
\item{location}{LinkedIn geography code, found here: \url{https://developer.linkedin.com/docs/reference/geography-codes}.}
\item{industry}{LinkedIn industry code, found here: \url{https://developer.linkedin.com/docs/reference/industry-codes}.}
}
\value{
Returns a list, information includes company id, company name, universal name, website, twitter handle, employee count, founded date, number of followers, and company description.
}
\description{
\code{searchCompanies} searches across LinkedIn's companies pages based on keywords, location, and industry.
}
\details{
In order to narrow the search down by location or industry, you must look up the proper input codes on the linkedin website. The geography codes can be found here: \url{https://developer.linkedin.com/docs/reference/geography-codes}, and the industry codes can be found here: \url{https://developer.linkedin.com/docs/reference/industry-codes}.
}
\examples{
\dontrun{
search.comp <- searchCompanies(in.auth, keywords = "LinkedIn")
}
}
\author{
Michael Piccirilli \email{michael.r.piccirilli@gmail.com}
}
\seealso{
\code{\link{getCompany}} \code{\link{searchJobs}}
}
| /man/searchCompanies.Rd | no_license | eliascabrera/Rlinkedin | R | false | false | 1,575 | rd | % Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/searchCompanies.R
\name{searchCompanies}
\alias{searchCompanies}
\title{Search for Companies on LinkedIn}
\usage{
searchCompanies(token, keywords, location = NULL, industry = NULL)
}
\arguments{
\item{token}{Authorization token.}
\item{keywords}{A keyword used anywhere in a company's listing. Multiple words should be separated by a space.}
\item{location}{LinkedIn geography code, found here: \url{https://developer.linkedin.com/docs/reference/geography-codes}.}
\item{industry}{LinkedIn industry code, found here: \url{https://developer.linkedin.com/docs/reference/industry-codes}.}
}
\value{
Returns a list, information includes company id, company name, universal name, website, twitter handle, employee count, founded date, number of followers, and company description.
}
\description{
\code{searchCompanies} searches across LinkedIn's companies pages based on keywords, location, and industry.
}
\details{
In order to narrow the search down by location or industry, you must look up the proper input codes on the linkedin website. The geography codes can be found here: \url{https://developer.linkedin.com/docs/reference/geography-codes}, and the industry codes can be found here: \url{https://developer.linkedin.com/docs/reference/industry-codes}.
}
\examples{
\dontrun{
search.comp <- searchCompanies(in.auth, keywords = "LinkedIn")
}
}
\author{
Michael Piccirilli \email{michael.r.piccirilli@gmail.com}
}
\seealso{
\code{\link{getCompany}} \code{\link{searchJobs}}
}
|
#' Read a CIFTI file quickly
#'
#' Read a CIFTI file by exporting it as a single GIFTI
#' using \code{-cifti-convert -to-gifti-ext} (\code{\link{read_cifti_flat}}),
#' and obtaining the brainordinate mapping using
#' \code{-cifti-export-dense-mapping} (\code{\link{info_cifti}}).
#'
#' @inheritSection Connectome_Workbench_Description Connectome Workbench Requirement
#'
#' @inheritParams cifti_fname_Param
#' @inheritParams surfL_fname_Param
#' @inheritParams surfR_fname_Param
#' @inheritParams brainstructures_Param_LR
#' @inheritParams wb_path_Param
#' @inheritParams verbose_Param_FALSE
#' @param ... Additional arguments to \code{read_cifti_flat}.
#'
#' @return A \code{"xifti"} object. See \code{\link{is.xifti}}.
#'
#' @keywords internal
#'
read_cifti_convert <- function(
cifti_fname,
surfL_fname=NULL, surfR_fname=NULL,
brainstructures=c("left","right"),
mwall_values=c(NA, NaN),
wb_path=NULL, verbose=FALSE, ...){
# Check arguments.
brainstructures <- match_input(
brainstructures, c("left","right","subcortical","all"),
user_value_label="brainstructures"
)
if ("all" %in% brainstructures) {
brainstructures <- c("left","right","subcortical")
}
# Create the template.
xifti <- template_xifti()
# ----------------------------------------------------------------------------
# Read files. ----------------------------------------------------------------
# ----------------------------------------------------------------------------
if (verbose) { exec_time <- Sys.time() }
if (verbose) { cat("Reading CIFTI file.\n") }
# Read the CIFTI info.
xifti$meta <- info_cifti(cifti_fname, wb_path)
bs_present <- brainstructures %in% xifti$meta$cifti$brainstructures
if (!all(bs_present)) {
warning(paste0(
"Only the following brainstructures are present in the CIFTI file: ",
paste(xifti$meta$cifti$brainstructures, collapse=", "), "\n"
))
brainstructures <- brainstructures[bs_present]
}
# Read the CIFTI data.
xifti_data <- read_cifti_flat(cifti_fname, wb_path=wb_path, ...)
if (get_cifti_extn(cifti_fname) == "dlabel.nii") {
if (!all(round(xifti_data) == xifti_data)) {
warning("The CIFTI file extension was \"dlabel.nii\" but the data values were not integers.")
} else {
mode(xifti_data) <- "integer"
}
}
# Place cortex data into the \code{"xifti"} object.
last_left <- sum(xifti$meta$cortex$medial_wall_mask$left)
last_right <- last_left + sum(xifti$meta$cortex$medial_wall_mask$right)
if ("left" %in% brainstructures) {
cortex <- make_cortex(
xifti_data[1:last_left,, drop=FALSE],
side = "left",
mwall = xifti$meta$cortex$medial_wall_mask$left,
mwall_values=mwall_values,
mwall_source="the CIFTI being read in",
)
xifti$data$cortex_left <- cortex$data
xifti$meta$cortex$medial_wall_mask["left"] <- list(cortex$mwall)
} else {
xifti$meta$cortex$medial_wall_mask["left"] <- list(template_xifti()$meta$cortex$medial_wall_mask$left)
}
if ("right" %in% brainstructures) {
cortex <- make_cortex(
xifti_data[(1+last_left):last_right,, drop=FALSE],
side = "right",
mwall = xifti$meta$cortex$medial_wall_mask$right,
mwall_values=mwall_values,
mwall_source="the CIFTI being read in",
)
xifti$data$cortex_right <- cortex$data
xifti$meta$cortex$medial_wall_mask["right"] <- list(cortex$mwall)
} else {
xifti$meta$cortex$medial_wall_mask["right"] <- list(template_xifti()$meta$cortex$medial_wall_mask$right)
}
# Place subcortical data into the \code{"xifti"} object.
if ("subcortical" %in% brainstructures) {
alpha_to_spatial <- order(order(xifti$meta$subcort$labels))
subcort_order <- c((1+last_right):nrow(xifti_data))[alpha_to_spatial]
xifti$data$subcort <- xifti_data[subcort_order,, drop=FALSE]
} else {
xifti$meta$subcort <- template_xifti()$meta$subcort
}
# Read surfaces.
if (!is.null(surfL_fname) | !is.null(surfR_fname)) {
if(verbose) { cat("...and surface(s).\n") }
}
if (!is.null(surfL_fname)) {
xifti$surf$cortex_left <- make_surf(surfL_fname, "left")
}
if (!is.null(surfR_fname)) {
xifti$surf$cortex_right <- make_surf(surfR_fname, "right")
}
# Finish.
if (!is.xifti(xifti)) { stop("The \"xifti\" object was invalid.") }
if (verbose) {
print(Sys.time() - exec_time)
exec_time <- Sys.time()
}
structure(xifti, class="xifti")
} | /R/read_cifti_convert.R | no_license | yoaman/r-cran-ciftiTools | R | false | false | 4,589 | r | #' Read a CIFTI file quickly
#'
#' Read a CIFTI file by exporting it as a single GIFTI
#' using \code{-cifti-convert -to-gifti-ext} (\code{\link{read_cifti_flat}}),
#' and obtaining the brainordinate mapping using
#' \code{-cifti-export-dense-mapping} (\code{\link{info_cifti}}).
#'
#' @inheritSection Connectome_Workbench_Description Connectome Workbench Requirement
#'
#' @inheritParams cifti_fname_Param
#' @inheritParams surfL_fname_Param
#' @inheritParams surfR_fname_Param
#' @inheritParams brainstructures_Param_LR
#' @inheritParams wb_path_Param
#' @inheritParams verbose_Param_FALSE
#' @param ... Additional arguments to \code{read_cifti_flat}.
#'
#' @return A \code{"xifti"} object. See \code{\link{is.xifti}}.
#'
#' @keywords internal
#'
read_cifti_convert <- function(
cifti_fname,
surfL_fname=NULL, surfR_fname=NULL,
brainstructures=c("left","right"),
mwall_values=c(NA, NaN),
wb_path=NULL, verbose=FALSE, ...){
# Check arguments.
brainstructures <- match_input(
brainstructures, c("left","right","subcortical","all"),
user_value_label="brainstructures"
)
if ("all" %in% brainstructures) {
brainstructures <- c("left","right","subcortical")
}
# Create the template.
xifti <- template_xifti()
# ----------------------------------------------------------------------------
# Read files. ----------------------------------------------------------------
# ----------------------------------------------------------------------------
if (verbose) { exec_time <- Sys.time() }
if (verbose) { cat("Reading CIFTI file.\n") }
# Read the CIFTI info.
xifti$meta <- info_cifti(cifti_fname, wb_path)
bs_present <- brainstructures %in% xifti$meta$cifti$brainstructures
if (!all(bs_present)) {
warning(paste0(
"Only the following brainstructures are present in the CIFTI file: ",
paste(xifti$meta$cifti$brainstructures, collapse=", "), "\n"
))
brainstructures <- brainstructures[bs_present]
}
# Read the CIFTI data.
xifti_data <- read_cifti_flat(cifti_fname, wb_path=wb_path, ...)
if (get_cifti_extn(cifti_fname) == "dlabel.nii") {
if (!all(round(xifti_data) == xifti_data)) {
warning("The CIFTI file extension was \"dlabel.nii\" but the data values were not integers.")
} else {
mode(xifti_data) <- "integer"
}
}
# Place cortex data into the \code{"xifti"} object.
last_left <- sum(xifti$meta$cortex$medial_wall_mask$left)
last_right <- last_left + sum(xifti$meta$cortex$medial_wall_mask$right)
if ("left" %in% brainstructures) {
cortex <- make_cortex(
xifti_data[1:last_left,, drop=FALSE],
side = "left",
mwall = xifti$meta$cortex$medial_wall_mask$left,
mwall_values=mwall_values,
mwall_source="the CIFTI being read in",
)
xifti$data$cortex_left <- cortex$data
xifti$meta$cortex$medial_wall_mask["left"] <- list(cortex$mwall)
} else {
xifti$meta$cortex$medial_wall_mask["left"] <- list(template_xifti()$meta$cortex$medial_wall_mask$left)
}
if ("right" %in% brainstructures) {
cortex <- make_cortex(
xifti_data[(1+last_left):last_right,, drop=FALSE],
side = "right",
mwall = xifti$meta$cortex$medial_wall_mask$right,
mwall_values=mwall_values,
mwall_source="the CIFTI being read in",
)
xifti$data$cortex_right <- cortex$data
xifti$meta$cortex$medial_wall_mask["right"] <- list(cortex$mwall)
} else {
xifti$meta$cortex$medial_wall_mask["right"] <- list(template_xifti()$meta$cortex$medial_wall_mask$right)
}
# Place subcortical data into the \code{"xifti"} object.
if ("subcortical" %in% brainstructures) {
alpha_to_spatial <- order(order(xifti$meta$subcort$labels))
subcort_order <- c((1+last_right):nrow(xifti_data))[alpha_to_spatial]
xifti$data$subcort <- xifti_data[subcort_order,, drop=FALSE]
} else {
xifti$meta$subcort <- template_xifti()$meta$subcort
}
# Read surfaces.
if (!is.null(surfL_fname) | !is.null(surfR_fname)) {
if(verbose) { cat("...and surface(s).\n") }
}
if (!is.null(surfL_fname)) {
xifti$surf$cortex_left <- make_surf(surfL_fname, "left")
}
if (!is.null(surfR_fname)) {
xifti$surf$cortex_right <- make_surf(surfR_fname, "right")
}
# Finish.
if (!is.xifti(xifti)) { stop("The \"xifti\" object was invalid.") }
if (verbose) {
print(Sys.time() - exec_time)
exec_time <- Sys.time()
}
structure(xifti, class="xifti")
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.